Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(terraform): add hosts for JAMStack sites #598

Merged
merged 1 commit into from
Sep 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions terraform/prd-cluster-oldeworld/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,19 @@ data "hcp_packer_image" "linode_ubuntu" {
region = "us-east"
}

data "linode_instances" "ops_standalone_backoffice" {
filter {
name = "label"
values = [
"ops-vm-backoffice",
]
}
}

locals {
pxy_node_count = 3 # number of proxy nodes
api_node_count = 3 # number of api nodes
jms_node_count = 3 # number of JAMStack nodes
clt_node_count = 2 # number of client nodes for EACH LANGUAGE!
}

Expand Down
128 changes: 128 additions & 0 deletions terraform/prd-cluster-oldeworld/next-06-nws-jms.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
resource "linode_instance" "prd_oldeworld_jms" {
count = local.jms_node_count
label = "prd-vm-oldeworld-jms-${count.index + 1}"

region = var.region
type = "g6-standard-2"
private_ip = true
watchdog_enabled = true

# NOTE:
# Value should use '_' as sepratator for compatibility with Ansible Dynamic Inventory
tags = ["prd", "oldeworld", "jms"]

# WARNING:
# Do not change, will delete and recreate all instances in the group
# NOTE:
# Value should use '_' as sepratator for compatibility with Ansible Dynamic Inventory
group = "prd_oldeworld_jms"
}

resource "linode_instance_disk" "prd_oldeworld_jms_disk__boot" {
count = local.jms_node_count
label = "prd-vm-oldeworld-jms-${count.index + 1}-boot"
linode_id = linode_instance.prd_oldeworld_jms[count.index].id
size = linode_instance.prd_oldeworld_jms[count.index].specs.0.disk

image = data.hcp_packer_image.linode_ubuntu.cloud_image_id
root_pass = var.password

stackscript_id = data.linode_stackscripts.cloudinit_scripts.stackscripts.0.id
stackscript_data = {
userdata = base64encode(
templatefile("${path.root}/cloud-init--userdata.yml.tftpl", {
tf_hostname = "jms-${count.index + 1}.oldeworld.prd.${local.zone}"
})
)
}
}

resource "linode_instance_config" "prd_oldeworld_jms_config" {
count = local.jms_node_count
label = "prd-vm-oldeworld-jms-config"
linode_id = linode_instance.prd_oldeworld_jms[count.index].id

device {
device_name = "sda"
disk_id = linode_instance_disk.prd_oldeworld_jms_disk__boot[count.index].id
}

# eth0 is the public interface.
interface {
purpose = "public"
}

# eth1 is the private interface.
interface {
purpose = "vlan"
label = "prd-oldeworld-vlan"
# Request the host IP for the machine
ipam_address = "${cidrhost("10.0.0.0/8", local.ipam_block_jms + count.index)}/24"
}

connection {
type = "ssh"
user = "root"
password = var.password
host = linode_instance.prd_oldeworld_jms[count.index].ip_address
}

# All of the provisioning should be done via cloud-init.
# This is just to setup the reboot.
provisioner "remote-exec" {
inline = [
# Wait for cloud-init to finish.
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done",
"echo Current hostname...; hostname",
"shutdown -r +1 'Terraform: Rebooting to apply hostname change in 1 min.'"
]
}

# This run is a hack to trigger the reboot,
# which may fail otherwise in the previous step.
provisioner "remote-exec" {
inline = [
"uptime"
]
}

helpers {
updatedb_disabled = true
}

kernel = "linode/grub2"
booted = true
}

resource "akamai_dns_record" "prd_oldeworld_jms_dnsrecord__vlan" {
count = local.jms_node_count

zone = local.zone
recordtype = "A"
ttl = 120

name = "jms-${count.index + 1}.oldeworld.prd.${local.zone}"
target = [trimsuffix(linode_instance_config.prd_oldeworld_jms_config[count.index].interface[1].ipam_address, "/24")]
}

resource "akamai_dns_record" "prd_oldeworld_jms_dnsrecord__public" {
count = local.jms_node_count

zone = local.zone
recordtype = "A"
ttl = 120

name = "pub.jms-${count.index + 1}.oldeworld.prd.${var.network_subdomain}.${local.zone}"
target = [linode_instance.prd_oldeworld_jms[count.index].ip_address]
}

resource "akamai_dns_record" "prd_oldeworld_jms_dnsrecord__private" {
count = local.jms_node_count

zone = local.zone
recordtype = "A"
ttl = 120

name = "prv.jms-${count.index + 1}.oldeworld.prd.${local.zone}"
target = [linode_instance.prd_oldeworld_jms[count.index].private_ip_address]
}
26 changes: 26 additions & 0 deletions terraform/prd-cluster-oldeworld/next-10-firewall.tf
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,29 @@ resource "linode_firewall" "prd_oldeworld_firewall" {
ipv6 = ["::/0"]
}

inbound {
label = "allow-all-tcp_from-vlan"
ports = "1-65535"
protocol = "TCP"
action = "ACCEPT"
ipv4 = flatten([
["10.0.0.0/8"]
])
}

inbound {
label = "allow-all-tcp-from-private-ip"
ports = "1-65535"
protocol = "TCP"
action = "ACCEPT"
ipv4 = flatten([
// Allow all ports from the backoffice instance private IP. Used for Docker Swarm management.
["${data.linode_instances.ops_standalone_backoffice.instances[0].private_ip_address}/32"],

// Allow all ports from the private IP within the instance group. Used for Docker Swarm management.
[for i in linode_instance.prd_oldeworld_jms : "${i.private_ip_address}/32"],
])
}
# outbound { }

inbound_policy = "DROP"
Expand All @@ -71,5 +94,8 @@ resource "linode_firewall" "prd_oldeworld_firewall" {

# All News Nodes.
[for i in linode_instance.prd_oldeworld_nws : i.id],

# All JMS Nodes.
[for i in linode_instance.prd_oldeworld_jms : i.id],
])
}