-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathinstances.tf
72 lines (71 loc) · 2.98 KB
/
instances.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# Our master instance with everything we need to use ansible
resource "aws_instance" "master" {
ami = "ami-0b59bfac6be064b78"
instance_type = "${var.instance_type}"
vpc_security_group_ids = ["${aws_security_group.public.id}"]
key_name = "Default SSH"
subnet_id = "${aws_subnet.default.id}"
associate_public_ip_address = true
# Use our local ssh key to connect to the master
connection {
type = "ssh"
user = "ec2-user"
private_key = "${file("~/.ssh/id_rsa")}"
}
# Create a new tar archive each time in case we changed something
provisioner "local-exec" {
command = "tar zcf /tmp/hadoop-configs.tar.gz hadoop"
}
# Get the master able to run Ansible
# This is the Ansible playbook that configures Hadoop on the master and
# workers
provisioner "file" {
source = "setup_hadoop.yml"
destination = "setup_hadoop.yml"
}
# These are all the Hadoop config files we use (except etc/hadoop/workers
# which is generated in Terrform)
provisioner "file" {
source = "/tmp/hadoop-configs.tar.gz"
destination = "hadoop-configs.tar.gz"
}
provisioner "remote-exec" {
inline = [
# Install ansible on master
"sudo yum-config-manager --enable epel",
"sudo yum -y install ansible",
# Keep ansible from verifying the identity of our workers
"sudo sh -c 'sed -i.bak s/#host_key_checking/host_key_checking/ /etc/ansible/ansible.cfg'",
# Put the internal key we created on master and make sure we can
# connect to ourselves
"echo \"${tls_private_key.internal_key.public_key_openssh}\" | tee -a ~/.ssh/authorized_keys > ~/.ssh/id_rsa.pub",
"echo \"${tls_private_key.internal_key.private_key_pem}\" > ~/.ssh/id_rsa",
"chmod 600 ~/.ssh/id_rsa",
# Setup our /etc/hosts on master
"sudo sh -c 'echo \"${self.private_ip} master\" >> /etc/hosts'",
"sudo sh -c 'echo \"${join("\n", data.template_file.worker_hosts.*.rendered)}\" >> /etc/hosts'",
# Setup our /etc/ansible/hosts on master
"sudo sh -c 'echo \"${data.template_file.ansible_hosts.rendered}\" > /etc/ansible/hosts'",
# Setup our /home/ec2-user/hadoop/etc/hadoop/workers on master
"echo \"${data.template_file.hadoop_workers.rendered}\" > ~/workers",
# Run our playbook
"ansible-playbook setup_hadoop.yml",
]
}
tags {
Name = "Hadoop Master"
}
}
# Our worker instance with an internal key
resource "aws_instance" "worker" {
count = "${var.number_of_workers}"
ami = "ami-0b59bfac6be064b78"
instance_type = "${var.instance_type}"
vpc_security_group_ids = ["${aws_security_group.private.id}"]
key_name = "${aws_key_pair.internal_auth.key_name}"
subnet_id = "${aws_subnet.default.id}"
associate_public_ip_address = true
tags {
Name = "Hadoop Worker ${count.index + 1}"
}
}