~damien/infrastructure

602d799e88466efdbed14666280ed641a0ebc7ae — Damien Radtke 2 years ago b9a7a90
wip
M ca/consul-agent-ca.srl => ca/consul-agent-ca.srl +1 -1
@@ 1,1 1,1 @@
2E741F16F5701C92061B3669C7546E4A1AA2C813
2E741F16F5701C92061B3669C7546E4A1AA2C820

A ca/nomad-agent-ca.srl => ca/nomad-agent-ca.srl +1 -0
@@ 0,0 1,1 @@
3A95C8A7EE198C7858E58366940C5F258426061A

A ca/vault-server-ca.srl => ca/vault-server-ca.srl +1 -0
@@ 0,0 1,1 @@
73BA2463646820941EB0EC0FDCDC6DE86EE141F7

M terraform/cluster/consul-server/main.tf => terraform/cluster/consul-server/main.tf +2 -1
@@ 2,7 2,7 @@ terraform {
  required_providers {
    linode = {
      source  = "linode/linode"
      version = "1.19.1"
      version = "~> 1.19.1"
    }
  }
}


@@ 19,6 19,7 @@ resource "linode_instance" "servers" {
  type             = random_id.servers[count.index].keepers.instance_type
  authorized_users = var.authorized_users
  group            = terraform.workspace
  tags            = [terraform.workspace]

  lifecycle {
    create_before_destroy = true

M terraform/cluster/consul-server/outputs.tf => terraform/cluster/consul-server/outputs.tf +2 -2
@@ 1,6 1,6 @@
output "instances" {
output "ips" {
  description = "Consul server instances"
  value       = linode_instance.servers
  value       = [for ip in linode_instance.servers[*].ipv6 : split("/", ip)[0]]
}

// vim: set expandtab shiftwidth=2 tabstop=2:

M terraform/cluster/main.tf => terraform/cluster/main.tf +9 -19
@@ 9,7 9,7 @@ variable authorized_users { type = list(string) }
module "consul-server" {
  source = "./consul-server"

  servers        = 3
  servers        = 1

  datacenter       = var.datacenter
  image            = var.image


@@ 17,16 17,13 @@ module "consul-server" {
  authorized_users = var.authorized_users
}

/*
module "nomad-server" {
  source = "./nomad-server"

  servers           = 1
  consul_version    = var.consul_version
  nomad_version     = var.nomad_version
  consul_server_ips = module.consul-server.instances[*].ipv6
  consul_server_ips = module.consul-server.ips

  datacenter       = var.region
  datacenter       = var.datacenter
  image            = var.image
  instance_type    = var.instance_type
  authorized_users = var.authorized_users


@@ 37,11 34,9 @@ module "nomad-client" {
  source = "./nomad-client"

  clients           = 1
  consul_version    = var.consul_version
  nomad_version     = var.nomad_version
  consul_server_ips = module.consul-server.instances[*].ipv6
  consul_server_ips = module.consul-server.ips

  datacenter       = var.region
  datacenter       = var.datacenter
  image            = var.image
  instance_type    = var.instance_type
  authorized_users = var.authorized_users


@@ 51,12 46,10 @@ module "nomad-client-load-balancer" {
  source = "./nomad-client"

  clients           = 1
  consul_version    = var.consul_version
  nomad_version     = var.nomad_version
  node_class        = "load-balancer"
  consul_server_ips = module.consul-server.instances[*].ipv6
  consul_server_ips = module.consul-server.ips

  datacenter       = var.region
  datacenter       = var.datacenter
  image            = var.image
  instance_type    = var.instance_type
  authorized_users = var.authorized_users


@@ 66,13 59,10 @@ module "vault-server" {
  source = "./vault-server"

  servers           = 1
  consul_version    = var.consul_version
  vault_version     = var.vault_version
  consul_server_ips = module.consul-server.instances[*].ipv6
  consul_server_ips = module.consul-server.ips

  datacenter       = var.region
  datacenter       = var.datacenter
  image            = var.image
  instance_type    = var.instance_type
  authorized_users = var.authorized_users
}
*/

M terraform/cluster/nomad-client/main.tf => terraform/cluster/nomad-client/main.tf +57 -218
@@ 1,3 1,13 @@
terraform {
  required_providers {
    linode = {
      source  = "linode/linode"
      version = "~> 1.19.1"
    }
  }
}

// TODO: use this
locals {
  extra_provisions_for_class = lookup({
    "load-balancer" = [


@@ 15,263 25,80 @@ locals {
}

resource "linode_instance" "clients" {
  count  = var.clients
  label  = "nomadclient-${random_id.clients[count.index].keepers.datacenter}-${random_id.clients[count.index].hex}"
  region = random_id.clients[count.index].keepers.datacenter
  image  = random_id.clients[count.index].keepers.image
  type   = random_id.clients[count.index].keepers.instance_type
  // private_ip       = true
  count            = var.clients
  label            = "nomad-client-${random_id.clients[count.index].keepers.datacenter}-${random_id.clients[count.index].hex}"
  region           = random_id.clients[count.index].keepers.datacenter
  image            = random_id.clients[count.index].keepers.image
  type             = random_id.clients[count.index].keepers.instance_type
  authorized_users = var.authorized_users
  group            = terraform.workspace

  stackscript_id = var.stackscript_id
  stackscript_data = {
    hostname       = "nomadclient-${random_id.clients[count.index].keepers.datacenter}-${random_id.clients[count.index].hex}"
    consul_version = random_id.clients[count.index].keepers.consul_version
    nomad_version  = random_id.clients[count.index].keepers.nomad_version
  }
  tags            = [terraform.workspace]

  lifecycle {
    create_before_destroy = true
  }

  // wait for stackscript to complete
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "while ! [[ -f /root/StackScript.complete ]]; do echo 'waiting for stackscript to complete...'; sleep 3; done",
    ]
    inline = ["echo SSH is up"]
  }

  // systemd service files
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/systemd/system/"
    source      = "../services/"
  // Recognize the new server
  // This isn't _ideal_, but it's better than disabling host key checking for every SSH command.
  provisioner "local-exec" {
    command = "ssh-keygen -R '${self.ip_address}' && ssh-keyscan -v '${self.ip_address}' >> ~/.ssh/known_hosts"
  }

  // cfssl config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/cfssl.json"
    content     = data.template_file.cfssl_config.rendered
  provisioner "local-exec" {
    command = "../ca/provision-cert --addr ${self.ip_address} --ca consul-agent --cn client.${self.region}.consul --owner consul:consul --outdir /etc/ssl/consul-agent --basename consul"
  }

  // consul config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/consul.d"
    source      = "../config/consul"
  }

  // consul client config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/consul.d/client.hcl"
    content     = <<-EOT
      node_name        = "${self.label}"
      datacenter       = "${var.datacenter}"
      server           = false
      retry_join       = [
        %{for ip in var.consul_server_ips~}
          "${split("/", ip)[0]}",
        %{endfor~}
      ]
    EOT
  }

  // nomad config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/nomad.d"
    source      = "../config/nomad"
  provisioner "local-exec" {
    command = "../ca/provision-cert --addr ${self.ip_address} --ca nomad-agent --cn client.${self.region}.nomad --owner nomad:nomad --outdir /etc/ssl/nomad-agent --basename nomad"
  }

  // nomad client config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/nomad.d/client.hcl"
    content     = <<-EOT
      datacenter = "${var.datacenter}"

      client {
        enabled = true

        # This is Nomad's default chroot + SSL certs.
        chroot_env {
          "/bin"            = "/bin"
          "/etc"            = "/etc"
          "/lib"            = "/lib"
          "/lib32"          = "/lib32"
          "/lib64"          = "/lib64"
          "/run/resolvconf" = "/run/resolvconf"
          "/sbin"           = "/sbin"
          "/usr"            = "/usr"
          # This is where SSL certs actually live on openSUSE. /etc/ssl/certs is symlinked to here
          "/var/lib/ca-certificates/pem" = "/var/lib/ca-certificates/pem"
        }

        node_class = "${var.node_class}"

        meta {
          %{for key, value in var.meta~}
          "${key}" = "${value}"
          %{endfor~}
        }
      }

      plugin "raw_exec" {
        config {
          enabled = true
        }
      datacenter       = "${var.datacenter}"
      server {
        enabled          = false
      }
    EOT
  }

  // firewall services
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/firewalld/services"
    source      = "../firewall/services/"
  }

  // firewall zones
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/firewalld/zones"
    source      = "../firewall/zones/"
  }

  // issue-cert script
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/usr/local/bin/issue-cert.sh"
    source      = "../scripts/issue-cert.sh"
  }

  // healthcheck script
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/usr/local/bin/healthcheck-nomad.sh"
    source      = "../scripts/healthcheck-nomad.sh"
  }

  // Consul certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/consul/ca.pem"
    source      = "/etc/ssl/consul/ca.pem"
  }

  // Nomad certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/nomad/ca.pem"
    source      = "/etc/ssl/nomad/ca.pem"
  }

  // Vault certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/vault/ca.pem"
    source      = "/etc/ssl/vault/ca.pem"
  }

  // set global environment variables
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/profile.local"
    content     = <<-EOT
      export CONSUL_HTTP_ADDR=unix:///var/run/consul/consul_https.sock
      export NOMAD_ADDR=https://localhost:4646
      export NOMAD_CACERT=/etc/ssl/nomad/ca.pem
      export NOMAD_CLIENT_CERT=/etc/ssl/nomad/cli.pem
      export NOMAD_CLIENT_KEY=/etc/ssl/nomad/cli-key.pem
    EOT
  }

  // install additional base packages needed for running tasks
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = ["zypper --non-interactive install git docker"]
  }

  // reload firewall
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = ["service firewalld reload"]
  }

  // fix permissions
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chown -R consul:consul /etc/consul.d",
      "chown -R nomad:nomad /etc/nomad.d",
      "chmod +x /usr/local/bin/issue-cert.sh",
      "chmod 0400 /etc/ssl/cfssl.json",
    ]
  }

  // issue certs
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "/usr/local/bin/issue-cert.sh --user consul --ca consul --name consul",
      "/usr/local/bin/issue-cert.sh --user nomad --ca nomad --name nomad --hostnames client.global.nomad",
      "/usr/local/bin/issue-cert.sh --user nomad --ca nomad --name cli",
      "/usr/local/bin/issue-cert.sh --user nomad --ca nomad --name nomad --hostnames client.global.nomad",
      "/usr/local/bin/issue-cert.sh --user nomad --ca consul --name consul",
      "/usr/local/bin/issue-cert.sh --user nomad --ca vault --name vault",
    ]
  }

  // fix CLI key permissions
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chmod g+r /etc/ssl/nomad/cli-key.pem",
    ]
  }

  // start services
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "systemctl enable consul && service consul start",
      "systemctl enable nomad && service nomad start",
    ]
  }

  // install autocompletion
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "sudo -u damien /usr/local/bin/consul -autocomplete-install",
      "sudo -u damien /usr/local/bin/nomad -autocomplete-install",
    ]
  }

  // run extra provisions based on the node class
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = local.extra_provisions_for_class
    inline = ["systemctl enable consul && service consul start && systemctl enable nomad && service nomad start"]
  }

  // run healthcheck script to ensure the node comes up
  // set hostname
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chmod +x /usr/local/bin/healthcheck-nomad.sh",
      "/usr/local/bin/healthcheck-nomad.sh",
    ]
    inline = ["hostnamectl set-hostname '${self.label}'"]
  }

  // disable further root ssh
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "sed -i 's/PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config",
      "service sshd reload",
      "cp /root/.ssh/authorized_keys /home/damien/.ssh/",
      "chown -R damien:users /home/damien/.ssh/",
      // TODO: re-disable root, but only after null resource provisioners have also run
      //"sed -i 's/PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config",
      //"service sshd reload",
    ]
  }
}


@@ 282,17 109,29 @@ resource "random_id" "clients" {
    datacenter     = var.datacenter
    image          = var.image
    instance_type  = var.instance_type
    consul_version = var.consul_version
    nomad_version  = var.nomad_version
  }
  byte_length = 3
  byte_length = 4
}

data "template_file" "cfssl_config" {
  template = file("${path.module}/../../../config/cfssl.json")
  vars = {
    ca_host = var.ca_host
    ca_key  = var.ca_key
resource "null_resource" "cluster" {
  triggers = {
    ips = "${join(",", var.consul_server_ips)}"
  }

  count = length(linode_instance.clients.*.id)
  connection {
    host = split("/", linode_instance.clients[count.index].ipv6)[0]
  }

  provisioner "file" {
    destination = "/etc/consul.d/join.hcl"
    content = <<EOF
retry_join = [${join(", ", [for ip in var.consul_server_ips : format("\"%s\"", ip)])}]
EOF
  }

  provisioner "remote-exec" {
    inline = ["service consul restart"]
  }
}


M terraform/cluster/nomad-client/variables.tf => terraform/cluster/nomad-client/variables.tf +0 -7
@@ 3,14 3,7 @@ variable clients { type = number }
variable datacenter { type = string }
variable image { type = string }
variable instance_type { type = string }
variable stackscript_id { type = number }
variable authorized_users { type = list(string) }

variable ca_host { type = string }
variable ca_key { type = string }

variable consul_version { type = string }
variable nomad_version { type = string }
variable consul_server_ips { type = list(string) }

variable meta {

M terraform/cluster/nomad-server/main.tf => terraform/cluster/nomad-server/main.tf +51 -168
@@ 1,217 1,88 @@
terraform {
  required_providers {
    linode = {
      source  = "linode/linode"
      version = "~> 1.19.1"
    }
  }
}

resource "linode_instance" "servers" {
  count            = var.servers
  label            = "nomadserver-${random_id.servers[count.index].keepers.datacenter}-${random_id.servers[count.index].hex}"
  label            = "nomad-server-${random_id.servers[count.index].keepers.datacenter}-${random_id.servers[count.index].hex}"
  region           = random_id.servers[count.index].keepers.datacenter
  image            = random_id.servers[count.index].keepers.image
  type             = random_id.servers[count.index].keepers.instance_type
  authorized_users = var.authorized_users
  group            = terraform.workspace

  stackscript_id = var.stackscript_id
  stackscript_data = {
    hostname       = "nomadserver-${random_id.servers[count.index].keepers.datacenter}-${random_id.servers[count.index].hex}"
    consul_version = random_id.servers[count.index].keepers.consul_version
    nomad_version  = random_id.servers[count.index].keepers.nomad_version
  }
  tags            = [terraform.workspace]

  lifecycle {
    create_before_destroy = true
  }

  // wait for stackscript to complete
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "while ! [[ -f /root/StackScript.complete ]]; do echo 'waiting for stackscript to complete...'; sleep 3; done",
    ]
    inline = ["echo SSH is up"]
  }

  // systemd service files
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/systemd/system/"
    source      = "../services/"
  // Recognize the new server
  // This isn't _ideal_, but it's better than disabling host key checking for every SSH command.
  provisioner "local-exec" {
    command = "ssh-keygen -R '${self.ip_address}' && ssh-keyscan -v '${self.ip_address}' >> ~/.ssh/known_hosts"
  }

  // cfssl config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/cfssl.json"
    content     = data.template_file.cfssl_config.rendered
  }

  // consul config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/consul.d"
    source      = "../config/consul"
  provisioner "local-exec" {
    command = "../ca/provision-cert --addr ${self.ip_address} --ca consul-agent --cn client.${self.region}.consul --owner consul:consul --outdir /etc/ssl/consul-agent --basename consul"
  }

  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/consul.d/client.hcl"
    content     = <<-EOT
      node_name        = "${self.label}"
      datacenter       = "${var.datacenter}"
      server           = false
      retry_join       = [
        %{for ip in var.consul_server_ips~}
          "${split("/", ip)[0]}",
        %{endfor~}
      ]
    EOT
  }

  // nomad config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/nomad.d"
    source      = "../config/nomad"
  provisioner "local-exec" {
    command = "../ca/provision-cert --addr ${self.ip_address} --ca nomad-agent --cn server.${self.region}.nomad --owner nomad:nomad --outdir /etc/ssl/nomad-agent --basename nomad"
  }

  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/nomad.d/server.hcl"
    content     = <<-EOT
      datacenter = "${var.datacenter}"

      datacenter       = "${var.datacenter}"
      server {
        enabled = true
        enabled          = true
        bootstrap_expect = ${var.servers}
      }
    EOT
  }

  // firewall services
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/firewalld/services"
    source      = "../firewall/services/"
  }

  // firewall zones
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/firewalld/zones"
    source      = "../firewall/zones/"
  }

  // issue-cert script
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/usr/local/bin/issue-cert.sh"
    source      = "../scripts/issue-cert.sh"
  }

  // Consul certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/consul/ca.pem"
    source      = "/etc/ssl/consul/ca.pem"
  }

  // Nomad certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/nomad/ca.pem"
    source      = "/etc/ssl/nomad/ca.pem"
  }

  // Vault certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/vault/ca.pem"
    source      = "/etc/ssl/vault/ca.pem"
  }

  // set global environment variables
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/profile.local"
    content     = <<-EOT
      export CONSUL_HTTP_ADDR=unix:///var/run/consul/consul_https.sock
      export NOMAD_ADDR=https://localhost:4646
      export NOMAD_CACERT=/etc/ssl/nomad/ca.pem
      export NOMAD_CLIENT_CERT=/etc/ssl/nomad/cli.pem
      export NOMAD_CLIENT_KEY=/etc/ssl/nomad/cli-key.pem
    EOT
  }

  // reload firewall
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = ["service firewalld reload"]
  }

  // set Vault token
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      <<-EOC
        SYSTEMD_EDITOR=tee systemctl edit nomad <<EOF
        [Service]
        Environment=VAULT_TOKEN=${var.vault_token}
        EOF
      EOC
      ,
      "chmod 0400 /etc/systemd/system/nomad.service.d/override.conf"
    ]
  }

  // fix permissions
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chown -R consul:consul /etc/consul.d",
      "chown -R nomad:nomad /etc/nomad.d",
      "chmod +x /usr/local/bin/issue-cert.sh",
      "chmod 0400 /etc/ssl/cfssl.json",
    ]
  }

  // issue certs
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "/usr/local/bin/issue-cert.sh --user consul --ca consul --name consul",
      "/usr/local/bin/issue-cert.sh --user nomad --ca nomad --name nomad --hostnames nomad.service.consul,server.global.nomad,${split("/", self.ipv6)[0]}",
      "/usr/local/bin/issue-cert.sh --user nomad --ca nomad --name cli",
      "/usr/local/bin/issue-cert.sh --user nomad --ca consul --name consul",
      "/usr/local/bin/issue-cert.sh --user nomad --ca vault --name vault",
    ]
  }

  // fix CLI key permissions
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chmod g+r /etc/ssl/nomad/cli-key.pem",
    ]
  }

  // start services
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "systemctl enable consul && service consul start",
      "systemctl enable nomad && service nomad start",
    ]
    inline = ["systemctl enable consul && service consul start && systemctl enable nomad && service nomad start"]
  }

  // install autocompletion
  // set hostname
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "sudo -u damien /usr/local/bin/consul -autocomplete-install",
      "sudo -u damien /usr/local/bin/nomad -autocomplete-install",
    ]
    inline = ["hostnamectl set-hostname '${self.label}'"]
  }

  // disable further root ssh
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "sed -i 's/PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config",
      "service sshd reload",
      "cp /root/.ssh/authorized_keys /home/damien/.ssh/",
      "chown -R damien:users /home/damien/.ssh/",
      // TODO: re-disable root, but only after null resource provisioners have also run
      //"sed -i 's/PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config",
      //"service sshd reload",
    ]
  }
}


@@ 222,17 93,29 @@ resource "random_id" "servers" {
    datacenter     = var.datacenter
    image          = var.image
    instance_type  = var.instance_type
    consul_version = var.consul_version
    nomad_version  = var.nomad_version
  }
  byte_length = 3
  byte_length = 4
}

data "template_file" "cfssl_config" {
  template = file("${path.module}/../../../config/cfssl.json")
  vars = {
    ca_host = var.ca_host
    ca_key  = var.ca_key
resource "null_resource" "cluster" {
  triggers = {
    ips = "${join(",", var.consul_server_ips)}"
  }

  count = length(linode_instance.servers.*.id)
  connection {
    host = split("/", linode_instance.servers[count.index].ipv6)[0]
  }

  provisioner "file" {
    destination = "/etc/consul.d/join.hcl"
    content = <<EOF
retry_join = [${join(", ", [for ip in var.consul_server_ips : format("\"%s\"", ip)])}]
EOF
  }

  provisioner "remote-exec" {
    inline = ["service consul restart"]
  }
}


M terraform/cluster/nomad-server/variables.tf => terraform/cluster/nomad-server/variables.tf +0 -7
@@ 3,15 3,8 @@ variable servers { type = number }
variable datacenter { type = string }
variable image { type = string }
variable instance_type { type = string }
variable stackscript_id { type = number }
variable authorized_users { type = list(string) }

variable ca_host { type = string }
variable ca_key { type = string }
variable vault_token { type = string }

variable consul_version { type = string }
variable nomad_version { type = string }
variable consul_server_ips { type = list(string) }

// vim: set expandtab shiftwidth=2 tabstop=2:

M terraform/cluster/outputs.tf => terraform/cluster/outputs.tf +2 -2
@@ 1,6 1,6 @@
output "consul-servers" {
  description = "Consul server instances"
  value       = module.consul-server.instances
  description = "Consul server IP addresses"
  value       = module.consul-server.ips
}

/*

M terraform/cluster/vault-server/main.tf => terraform/cluster/vault-server/main.tf +49 -143
@@ 1,3 1,12 @@
terraform {
  required_providers {
    linode = {
      source  = "linode/linode"
      version = "~> 1.19.1"
    }
  }
}

resource "linode_instance" "servers" {
  count            = var.servers
  label            = "vault-${random_id.servers[count.index].keepers.datacenter}-${random_id.servers[count.index].hex}"


@@ 6,189 15,74 @@ resource "linode_instance" "servers" {
  type             = random_id.servers[count.index].keepers.instance_type
  authorized_users = var.authorized_users
  group            = terraform.workspace

  stackscript_id = var.stackscript_id
  stackscript_data = {
    hostname       = "vault-${random_id.servers[count.index].keepers.datacenter}-${random_id.servers[count.index].hex}"
    consul_version = random_id.servers[count.index].keepers.consul_version
    vault_version  = random_id.servers[count.index].keepers.vault_version
  }
  tags            = [terraform.workspace]

  lifecycle {
    create_before_destroy = true
  }

  // wait for stackscript to complete
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "while ! [[ -f /root/StackScript.complete ]]; do echo 'waiting for stackscript to complete...'; sleep 3; done",
    ]
  }

  // systemd service files
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/systemd/system/"
    source      = "../services/"
    inline = ["echo SSH is up"]
  }

  // cfssl config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/cfssl.json"
    content     = data.template_file.cfssl_config.rendered
  // Recognize the new server
  // This isn't _ideal_, but it's better than disabling host key checking for every SSH command.
  provisioner "local-exec" {
    command = "ssh-keygen -R '${self.ip_address}' && ssh-keyscan -v '${self.ip_address}' >> ~/.ssh/known_hosts"
  }

  // consul config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/consul.d"
    source      = "../config/consul"
  provisioner "local-exec" {
    command = "../ca/provision-cert --addr ${self.ip_address} --ca consul-agent --cn client.${self.region}.consul --owner consul:consul --outdir /etc/ssl/consul-agent --basename consul"
  }

  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/consul.d/client.hcl"
    content     = <<-EOT
      node_name        = "${self.label}"
      datacenter       = "${var.datacenter}"
      server           = false
      retry_join       = [
        %{for ip in var.consul_server_ips~}
          "${split("/", ip)[0]}",
        %{endfor~}
      ]
    EOT
  }

  // vault config
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/vault.d"
    source      = "../config/vault"
  provisioner "local-exec" {
    command = "../ca/provision-cert --addr ${self.ip_address} --ca vault-server --cn server.${self.region}.vault --owner vault:vault --outdir /etc/ssl/vault-server --basename vault"
  }

  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/vault.d/server.hcl"
    content     = <<-EOT
      datacenter = "${var.datacenter}"

      datacenter       = "${var.datacenter}"
      server {
        enabled = true
        enabled          = true
        bootstrap_expect = ${var.servers}
      }
    EOT
  }

  // firewall services
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/firewalld/services"
    source      = "../firewall/services/"
  }

  // firewall zones
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/firewalld/zones"
    source      = "../firewall/zones/"
  }

  // issue-cert script
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/usr/local/bin/issue-cert.sh"
    source      = "../scripts/issue-cert.sh"
  }

  // Consul certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/consul/ca.pem"
    source      = "/etc/ssl/consul/ca.pem"
  }

  // Vault certificate authority
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/ssl/vault/ca.pem"
    source      = "/etc/ssl/vault/ca.pem"
  }

  // set global environment variables
  provisioner "file" {
    connection { host = split("/", self.ipv6)[0] }
    destination = "/etc/profile.local"
    content     = <<-EOT
      export CONSUL_HTTP_ADDR=unix:///var/run/consul/consul_https.sock
      export VAULT_ADDR=https://localhost:8200
      export VAULT_CACERT=/etc/ssl/vault/ca.pem
      export VAULT_CLIENT_CERT=/etc/ssl/vault/cli.pem
      export VAULT_CLIENT_KEY=/etc/ssl/vault/cli-key.pem
    EOT
  }

  // reload firewall
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = ["service firewalld reload"]
  }

  // fix permissions
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chown -R consul:consul /etc/consul.d",
      "chown -R vault:vault /etc/vault.d",
      "chmod +x /usr/local/bin/issue-cert.sh",
      "chmod 0400 /etc/ssl/cfssl.json",
    ]
  }

  // issue certs
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "/usr/local/bin/issue-cert.sh --user consul --ca consul --name consul",
      "/usr/local/bin/issue-cert.sh --user vault --ca consul --name consul",
      "/usr/local/bin/issue-cert.sh --user vault --ca vault --name vault --hostnames vault.service.consul,active.vault.service.consul,${split("/", self.ipv6)[0]}",
      "/usr/local/bin/issue-cert.sh --user vault --ca vault --name cli",
    ]
  }

  // fix CLI key permissions
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "chmod g+r /etc/ssl/vault/cli-key.pem",
    ]
  }

  // start services
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "systemctl enable consul && service consul start",
      "systemctl enable vault && service vault start",
    ]
    inline = ["systemctl enable consul && service consul start && systemctl enable vault && service vault start"]
  }

  // install autocompletion
  // set hostname
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "sudo -u damien /usr/local/bin/consul -autocomplete-install",
      "sudo -u damien /usr/local/bin/vault -autocomplete-install",
    ]
    inline = ["hostnamectl set-hostname '${self.label}'"]
  }

  // disable further root ssh
  provisioner "remote-exec" {
    connection { host = split("/", self.ipv6)[0] }
    inline = [
      "sed -i 's/PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config",
      "service sshd reload",
      "cp /root/.ssh/authorized_keys /home/damien/.ssh/",
      "chown -R damien:users /home/damien/.ssh/",
      // TODO: re-disable root, but only after null resource provisioners have also run
      //"sed -i 's/PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config",
      //"service sshd reload",
    ]
  }
}


@@ 199,17 93,29 @@ resource "random_id" "servers" {
    datacenter     = var.datacenter
    image          = var.image
    instance_type  = var.instance_type
    consul_version = var.consul_version
    vault_version  = var.vault_version
  }
  byte_length = 4
}

data "template_file" "cfssl_config" {
  template = file("${path.module}/../../../config/cfssl.json")
  vars = {
    ca_host = var.ca_host
    ca_key  = var.ca_key
resource "null_resource" "cluster" {
  triggers = {
    ips = "${join(",", var.consul_server_ips)}"
  }

  count = length(linode_instance.servers.*.id)
  connection {
    host = split("/", linode_instance.servers[count.index].ipv6)[0]
  }

  provisioner "file" {
    destination = "/etc/consul.d/join.hcl"
    content = <<EOF
retry_join = [${join(", ", [for ip in var.consul_server_ips : format("\"%s\"", ip)])}]
EOF
  }

  provisioner "remote-exec" {
    inline = ["service consul restart"]
  }
}


M terraform/cluster/vault-server/variables.tf => terraform/cluster/vault-server/variables.tf +0 -7
@@ 3,14 3,7 @@ variable servers { type = number }
variable datacenter { type = string }
variable image { type = string }
variable instance_type { type = string }
variable stackscript_id { type = number }
variable authorized_users { type = list(string) }

variable ca_host { type = string }
variable ca_key { type = string }

variable consul_version { type = string }
variable vault_version { type = string }
variable consul_server_ips { type = list(string) }

// vim: set expandtab shiftwidth=2 tabstop=2:

M terraform/main.tf => terraform/main.tf +5 -1
@@ 2,7 2,7 @@ terraform {
  required_providers {
    linode = {
      source  = "linode/linode"
      version = "1.19.1"
      version = "~> 1.19.1"
    }
  }
}


@@ 23,3 23,7 @@ module "cluster" {
  instance_type    = "g6-nanode-1"
  vault_token      = "root_token"
}

output "consul-servers" {
  value = module.cluster.consul-servers
}

A terraform/versions.tf => terraform/versions.tf +3 -0
@@ 0,0 1,3 @@
terraform {
  required_version = ">= 0.13"
}