diff --git a/terraform/prd-cluster-oldeworld/main.tf b/terraform/prd-cluster-oldeworld/main.tf index f1fe8db8..5c675db3 100644 --- a/terraform/prd-cluster-oldeworld/main.tf +++ b/terraform/prd-cluster-oldeworld/main.tf @@ -22,9 +22,19 @@ data "hcp_packer_image" "linode_ubuntu" { region = "us-east" } +data "linode_instances" "ops_standalone_backoffice" { + filter { + name = "label" + values = [ + "ops-vm-backoffice", + ] + } +} + locals { pxy_node_count = 3 # number of proxy nodes api_node_count = 3 # number of api nodes + jms_node_count = 3 # number of JAMStack nodes clt_node_count = 2 # number of client nodes for EACH LANGUAGE! } diff --git a/terraform/prd-cluster-oldeworld/next-06-nws-jms.tf b/terraform/prd-cluster-oldeworld/next-06-nws-jms.tf new file mode 100644 index 00000000..f8a7e769 --- /dev/null +++ b/terraform/prd-cluster-oldeworld/next-06-nws-jms.tf @@ -0,0 +1,128 @@ +resource "linode_instance" "prd_oldeworld_jms" { + count = local.jms_node_count + label = "prd-vm-oldeworld-jms-${count.index + 1}" + + region = var.region + type = "g6-standard-2" + private_ip = true + watchdog_enabled = true + + # NOTE: + # Value should use '_' as sepratator for compatibility with Ansible Dynamic Inventory + tags = ["prd", "oldeworld", "jms"] + + # WARNING: + # Do not change, will delete and recreate all instances in the group + # NOTE: + # Value should use '_' as sepratator for compatibility with Ansible Dynamic Inventory + group = "prd_oldeworld_jms" +} + +resource "linode_instance_disk" "prd_oldeworld_jms_disk__boot" { + count = local.jms_node_count + label = "prd-vm-oldeworld-jms-${count.index + 1}-boot" + linode_id = linode_instance.prd_oldeworld_jms[count.index].id + size = linode_instance.prd_oldeworld_jms[count.index].specs.0.disk + + image = data.hcp_packer_image.linode_ubuntu.cloud_image_id + root_pass = var.password + + stackscript_id = data.linode_stackscripts.cloudinit_scripts.stackscripts.0.id + stackscript_data = { + userdata = base64encode( + templatefile("${path.root}/cloud-init--userdata.yml.tftpl", { + tf_hostname = "jms-${count.index + 1}.oldeworld.prd.${local.zone}" + }) + ) + } +} + +resource "linode_instance_config" "prd_oldeworld_jms_config" { + count = local.jms_node_count + label = "prd-vm-oldeworld-jms-config" + linode_id = linode_instance.prd_oldeworld_jms[count.index].id + + device { + device_name = "sda" + disk_id = linode_instance_disk.prd_oldeworld_jms_disk__boot[count.index].id + } + + # eth0 is the public interface. + interface { + purpose = "public" + } + + # eth1 is the private interface. + interface { + purpose = "vlan" + label = "prd-oldeworld-vlan" + # Request the host IP for the machine + ipam_address = "${cidrhost("10.0.0.0/8", local.ipam_block_jms + count.index)}/24" + } + + connection { + type = "ssh" + user = "root" + password = var.password + host = linode_instance.prd_oldeworld_jms[count.index].ip_address + } + + # All of the provisioning should be done via cloud-init. + # This is just to setup the reboot. + provisioner "remote-exec" { + inline = [ + # Wait for cloud-init to finish. + "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done", + "echo Current hostname...; hostname", + "shutdown -r +1 'Terraform: Rebooting to apply hostname change in 1 min.'" + ] + } + + # This run is a hack to trigger the reboot, + # which may fail otherwise in the previous step. + provisioner "remote-exec" { + inline = [ + "uptime" + ] + } + + helpers { + updatedb_disabled = true + } + + kernel = "linode/grub2" + booted = true +} + +resource "akamai_dns_record" "prd_oldeworld_jms_dnsrecord__vlan" { + count = local.jms_node_count + + zone = local.zone + recordtype = "A" + ttl = 120 + + name = "jms-${count.index + 1}.oldeworld.prd.${local.zone}" + target = [trimsuffix(linode_instance_config.prd_oldeworld_jms_config[count.index].interface[1].ipam_address, "/24")] +} + +resource "akamai_dns_record" "prd_oldeworld_jms_dnsrecord__public" { + count = local.jms_node_count + + zone = local.zone + recordtype = "A" + ttl = 120 + + name = "pub.jms-${count.index + 1}.oldeworld.prd.${var.network_subdomain}.${local.zone}" + target = [linode_instance.prd_oldeworld_jms[count.index].ip_address] +} + +resource "akamai_dns_record" "prd_oldeworld_jms_dnsrecord__private" { + count = local.jms_node_count + + zone = local.zone + recordtype = "A" + ttl = 120 + + name = "prv.jms-${count.index + 1}.oldeworld.prd.${local.zone}" + target = [linode_instance.prd_oldeworld_jms[count.index].private_ip_address] +} diff --git a/terraform/prd-cluster-oldeworld/next-10-firewall.tf b/terraform/prd-cluster-oldeworld/next-10-firewall.tf index e534f3f2..65f00184 100644 --- a/terraform/prd-cluster-oldeworld/next-10-firewall.tf +++ b/terraform/prd-cluster-oldeworld/next-10-firewall.tf @@ -57,6 +57,29 @@ resource "linode_firewall" "prd_oldeworld_firewall" { ipv6 = ["::/0"] } + inbound { + label = "allow-all-tcp_from-vlan" + ports = "1-65535" + protocol = "TCP" + action = "ACCEPT" + ipv4 = flatten([ + ["10.0.0.0/8"] + ]) + } + + inbound { + label = "allow-all-tcp-from-private-ip" + ports = "1-65535" + protocol = "TCP" + action = "ACCEPT" + ipv4 = flatten([ + // Allow all ports from the backoffice instance private IP. Used for Docker Swarm management. + ["${data.linode_instances.ops_standalone_backoffice.instances[0].private_ip_address}/32"], + + // Allow all ports from the private IP within the instance group. Used for Docker Swarm management. + [for i in linode_instance.prd_oldeworld_jms : "${i.private_ip_address}/32"], + ]) + } # outbound { } inbound_policy = "DROP" @@ -71,5 +94,8 @@ resource "linode_firewall" "prd_oldeworld_firewall" { # All News Nodes. [for i in linode_instance.prd_oldeworld_nws : i.id], + + # All JMS Nodes. + [for i in linode_instance.prd_oldeworld_jms : i.id], ]) }