AWS+ Terra-form launch Infrastructure

Neeteesh Yadav
10 min readJun 17, 2020

--

In this artical we learn how to use of terr-form launch AWS instance?

AWS:-

Amazon Web Services (AWS) is a subsidiary of Amazon that provides on-demand cloud computing platforms and APIs to individuals, companies, and governments, on a metered pay-as-you-go basis.

Terraform:-

Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.Terraform use to launch hole infrastructure of any cloud platform like AWS, Microsoft Azure etc..

Terraform use to Hashicorp Configuration Language (HCL),or JSON script-ing language to write code run the infrastructure.

Description or Details of use case, what we do in this infrastructure….

step1:- Create the key and security group which allow the port 80.
step2:- Launch EC2 instance.
step3:- In this Ec2 instance use the key and security group which we have created in step 1.
step4:- Launch one Volume (EBS) and mount that volume into /var/www/html
step5:- Developer have uploded the code into github repo also the repo has some images.
step6:-. Copy the github repo code into /var/www/html
step7:-. Create S3 bucket, and copy/deploy the images from github repo into the s3 bucket and change the permission to public readable.
step8:- Create a Cloudfront using s3 bucket(which contains images) and use the Cloudfront URL to update in code in /var/www/html

Use Terraform launching the infrastructure……

//create provider run the login and region where to launch the instances
provider “aws” {
region = “ap-south-1”
profile =”Neetesh”
}

//create key pairs

resource “tls_private_key” “privatekey” {
algorithm = “RSA”
}

resource “aws_key_pair” “keypair” {
key_name = “terraformkey”
public_key = “${tls_private_key.privatekey.public_key_openssh}”
depends_on=[
tls_private_key.privatekey
]
}

resource “local_file” “key” {
content = “${tls_private_key.privatekey.private_key_pem}”
filename = “terraformkey.pem”
depends_on=[aws_key_pair.keypair]

}
//create security group to use SSH

resource “aws_security_group” “sg_gp” {
name = “sg_gp”
description = “Apply SSH”
vpc_id = “vpc-fba9b493”

ingress {
description = “TLS from VPC”
from_port = 22
to_port = 22
protocol = “tcp”
cidr_blocks = [“0.0.0.0/0”]
}
ingress {
description = “HTTP”
from_port = 80
to_port = 80
protocol = “tcp”
cidr_blocks = [“0.0.0.0/0”]
}
egress {
from_port = 0
to_port = 0
protocol = “-1”
cidr_blocks = [“0.0.0.0/0”]
}

tags = {
Name = “sg_gp”
}
}
//resource create aws_instance
resource “aws_instance” “web” {
ami = “ami-0447a12f28fddb066”
instance_type = “t2.micro”
key_name=”terraformkey”
security_groups=[“sg_gp”]
//connect the operating system
connection{
type=”ssh”
user= “ec2-user”
private_key =”${tls_private_key.privatekey.private_key_pem}”
host = aws_instance.web.public_ip
}

// Launch the web server and php

provisioner “remote-exec” {
inline = [
“sudo yum install httpd php git -y”,
“sudo systemctl restart httpd”,
“sudo systemctl enable httpd”,
]
}
tags = {
Name = “LINUX”
}
}
//EBS volume creation
resource “aws_ebs_volume” “esb1” {
availability_zone = aws_instance.web.availability_zone
size = 1
tags = {
Name = “mypd”
}
}

resource “aws_volume_attachment” “ebs_att” {
depends_on=[aws_ebs_volume.esb1]
device_name = “/dev/sdh”
volume_id = “${aws_ebs_volume.esb1.id}”
instance_id = “${aws_instance.web.id}”
force_detach = true
}
resource “null_resource” “remote2” {

depends_on = [
aws_volume_attachment.ebs_att,
]
connection {
type = “ssh”
user = “ec2-user”
private_key = “${tls_private_key.privatekey.private_key_pem}”
host = aws_instance.web.public_ip
}

provisioner “remote-exec” {
inline = [
“sudo mkfs.ext4 /dev/xvdh”,
“sudo mount /dev/xvdh /var/www/html”,
“sudo rm -rf /var/www/html/*”,
“sudo git clone https://github.com/imneeteeshyadav98/hmc_t1.git /var/www/html/”,
]
}
}
// create s3 bucket
resource “aws_s3_bucket” “neeteshbucket1234”{
bucket = “neeteshbucket1234”
acl = “public-read”
versioning{enabled=true}
}
resource “aws_s3_bucket_object” “fileupload” {
key = “static_images”
bucket = “${aws_s3_bucket.neeteshbucket1234.id}”
acl=”public-read”
source = “1.png”
etag=filemd5(“1.png”)
}
locals {
s3_origin_id = “myS3Origin”
}

resource “aws_cloudfront_distribution” “s3_distribution” {
origin {
domain_name = “${aws_s3_bucket.neeteshbucket1234.bucket_regional_domain_name}”
origin_id = “${local.s3_origin_id}”
}

enabled = true
is_ipv6_enabled = true
comment = “Imges”
default_root_object = “static_images”
default_cache_behavior {
allowed_methods = [“DELETE”, “GET”, “HEAD”, “OPTIONS”, “PATCH”, “POST”, “PUT”]
cached_methods = [“GET”, “HEAD”]
target_origin_id = “${local.s3_origin_id}”

forwarded_values {
query_string = false

cookies {
forward = “none”
}
}
viewer_protocol_policy = “allow-all”
min_ttl = 0
default_ttl = 3600
max_ttl = 86400
}
ordered_cache_behavior {
path_pattern = “/content/immutable/*”
allowed_methods = [“GET”, “HEAD”, “OPTIONS”]
cached_methods = [“GET”, “HEAD”, “OPTIONS”]
target_origin_id = “${local.s3_origin_id}”

forwarded_values {
query_string = false
headers = [“Origin”]

cookies {
forward = “none”
}
}

min_ttl = 0
default_ttl = 86400
max_ttl = 31536000
compress = true
viewer_protocol_policy = “redirect-to-https”
}

//# Cache behavior with precedence 1
ordered_cache_behavior {
path_pattern = “/content/*”
allowed_methods = [“GET”, “HEAD”, “OPTIONS”]
cached_methods = [“GET”, “HEAD”]
target_origin_id = “${local.s3_origin_id}”

forwarded_values {
query_string = false

cookies {
forward = “none”
}
}

min_ttl = 0
default_ttl = 3600
max_ttl = 86400
compress = true
viewer_protocol_policy = “redirect-to-https”
}

price_class = “PriceClass_200”

restrictions {
geo_restriction {
restriction_type = “whitelist”
locations = [“IN”]
}
}

tags = {
Environment = “production”
}

viewer_certificate {
cloudfront_default_certificate = true
}
connection{
type=”ssh”
user= “ec2-user”
private_key =”${tls_private_key.privatekey.private_key_pem}”
host = aws_instance.web.public_ip
}
provisioner “remote-exec” {

inline = [

// “sudo su << \”EOF\” \n echo \”<img src=’${self.domain_name}’>\” >> /var/www/html/index.php \n \”EOF\””

“sudo su << EOF”,

“echo \”<center><img src=’http://${self.domain_name}/${aws_s3_bucket_object.fileupload.key}' height=’100px’ width=’200px’></center>\” >> /var/www/html/index.php”,

“EOF”

]

}
}

resource “null_resource” “nulllocal1” {

depends_on = [
null_resource.remote2,aws_cloudfront_distribution.s3_distribution
]

provisioner “local-exec” {
command = “firefox ${aws_instance.web.public_ip}”
}
}

Explanation of code and run the code and find the output:-

Step1:- Configure the AWS

In this part we create secret key and access key to configure AWS.

provider “aws” {
region = “ap-south-1”
profile =”Neetesh”
}

provider:-

A provider is responsible for understanding API interactions and exposing resources. Providers generally are an IaaS (e.g. Alibaba Cloud, AWS, GCP, Microsoft Azure, OpenStack), PaaS (e.g. Heroku), or SaaS services (e.g. Terraform Cloud, DNSimple, Cloudflare).

region:-

Region use to provide a data center where to developer launch your infrastructure, in thsi particular example we use Mumbai data center.

profile:-

Profile use to provide the login AWS platform.

Step2:- Create Key pairs

A key pair is used to control login access to EC2 instances.

resource “tls_private_key” “privatekey” {
algorithm = “RSA”
}

resource “aws_key_pair” “keypair” {
key_name = “terraformkey”
public_key = “${tls_private_key.privatekey.public_key_openssh}”
depends_on=[
tls_private_key.privatekey
]
}

resource “local_file” “key” {
content = “${tls_private_key.privatekey.private_key_pem}”
filename = “terraformkey.pem”
depends_on=[aws_key_pair.keypair]

}

resource:-

Resources are the most important element in the Terraform language. Each resource block describes one or more infrastructure objects, such as virtual networks, compute instances, or higher-level components such as DNS records.

tls_private_key:-

Generates a secure private key and encodes it as PEM. This resource is primarily intended for easily bootstrapping throwaway development environments.

privatekey:-

Give a resource name further use anywhere in the programs.

algorithm:- (Required) The name of the algorithm to use for the key. Currently-supported values are "RSA" and "ECDSA".

aws_key_pair:-

A key pair is used to control login access to EC2 instances.

key_name - (Optional) The name for the key pair.

public_key - (Required) The public key material.

local_file:-

Generates a local file with the given content.

content - (Optional) The content of file to create. Conflicts with sensitive_content and content_base64.

filename - (Required) The path of the file to create.

Step:-Create Security group

Terraform currently provides both a standalone Security Group Rule resource (a single ingress or egress rule), and a Security Group resource with ingress and egress rules defined in-line.

resource “aws_security_group” “sg_gp” {
name = “sg_gp”
description = “Apply SSH”
vpc_id = “vpc-fba9b493”

ingress {
description = “TLS from VPC”
from_port = 22
to_port = 22
protocol = “tcp”
cidr_blocks = [“0.0.0.0/0”]
}
ingress {
description = “HTTP”
from_port = 80
to_port = 80
protocol = “tcp”
cidr_blocks = [“0.0.0.0/0”]
}
egress {
from_port = 0
to_port = 0
protocol = “-1”
cidr_blocks = [“0.0.0.0/0”]
}

tags = {
Name = “sg_gp”
}
}

name - (Optional, Forces new resource) The name of the security group. If omitted, Terraform will assign a random, unique name.

ingress - (Optional) Can be specified multiple times for each ingress rule. Each ingress block supports fields documented below. This argument is processed in attribute-as-blocks mode.

egress - (Optional, VPC only) Can be specified multiple times for each egress rule. Each egress block supports fields documented below. This argument is processed in attribute-as-blocks mode.

Step:- Launch Instances

This allows instances to be created, updated, and deleted. Instances also support provisioning.

resource “aws_instance” “web” {
ami = “ami-0447a12f28fddb066”
instance_type = “t2.micro”
key_name=”terraformkey”
security_groups=[“sg_gp”]
//connect the operating system
connection{
type=”ssh”
user= “ec2-user”
private_key =”${tls_private_key.privatekey.private_key_pem}”
host = aws_instance.web.public_ip
}

ami - (Required) The AMI to use for the instance.

availability_zone - (Optional) The AZ to start the instance in.

instance_type - (Required) The type of instance to start. Updates to this field will trigger a stop/start of the EC2 instance.

key_name - (Optional) The key name of the Key Pair to use for the instance; which can be managed using the aws_key_pair resource.

security_groups - (Optional, EC2-Classic and default VPC only) A list of security group names (EC2-Classic) or IDs (default VPC) to associate with.

Step4:- Launch Webserver and PHP

provisioner “remote-exec” {
inline = [
“sudo yum install httpd php git -y”,
“sudo systemctl restart httpd”,
“sudo systemctl enable httpd”,
]
}
tags = {
Name = “LINUX”
}
}

step4:- Create EBS Volumes

EBS volumes like a hardisk or storge device to store data parmanent in the cloud.

resource “aws_ebs_volume” “esb1” {
availability_zone = aws_instance.web.availability_zone
size = 1
tags = {
Name = “mypd”
}
}

availability_zone - (Required) The AZ where the EBS volume will exist.

size - (Optional) The size of the drive in GiBs.

step5:-Attach the volumes

Provides an AWS EBS Volume Attachment as a top level resource, to attach and detach volumes from AWS Instances.

resource “aws_volume_attachment” “ebs_att” {
depends_on=[aws_ebs_volume.esb1]
device_name = “/dev/sdh”
volume_id = “${aws_ebs_volume.esb1.id}”
instance_id = “${aws_instance.web.id}”
force_detach = true
}

device_name - (Required) The device name to expose to the instance (for example, /dev/sdh or xvdh). See Device Naming on Linux Instances and Device Naming on Windows Instances for more information.

instance_id - (Required) ID of the Instance to attach to.

volume_id - (Required) ID of the Volume to be attached.

step6:- Connect the ec2 instnace

In this step we connect the EC2 instance and run and clone the github repository on the /var/www/html folder and /var/www/html/ mount the EBS volumes.

resource “null_resource” “remote2” {

depends_on = [
aws_volume_attachment.ebs_att,
]
connection {
type = “ssh”
user = “ec2-user”
private_key = “${tls_private_key.privatekey.private_key_pem}”
host = aws_instance.web.public_ip
}

provisioner “remote-exec” {
inline = [
“sudo mkfs.ext4 /dev/xvdh”,
“sudo mount /dev/xvdh /var/www/html”,
“sudo rm -rf /var/www/html/*”,
“sudo git clone https://github.com/imneeteeshyadav98/hmc_t1.git /var/www/html/”,
]
}
}

step7:-Create S3 bucket

S3 bucket use to store the data.Bucket name are unke in the data center resion.

resource “aws_s3_bucket” “neeteshbucket1234”{
bucket = “neeteshbucket1234”
acl = “public-read”
versioning{enabled=true}
}

bucket - (Optional, Forces new resource) The name of the bucket. If omitted, Terraform will assign a random, unique name

acl - (Optional) The canned ACL to apply. Defaults to "private". Conflicts with grant.

step8:-Upload image on S3 Bucket

resource “aws_s3_bucket_object” “fileupload” {
key = “static_images”
bucket = “${aws_s3_bucket.neeteshbucket1234.id}”
acl=”public-read”
source = “1.png”
etag=filemd5(“1.png”)
}
locals {
s3_origin_id = “myS3Origin”
}

bucket - (Required) The name of the bucket to put the file in. Alternatively, an S3 access point ARN can be specified.

key - (Required) The name of the object once it is in the bucket.

source - (Optional, conflicts with content and content_base64) The path to a file that will be read and uploaded as raw bytes for the object content.

etag - (Optional) Used to trigger updates. The only meaningful value is ${filemd5("path/to/file")} (Terraform 0.11.12 or later) or ${md5(file("path/to/file"))} (Terraform 0.11.11 or earlier). This attribute is not compatible with KMS encryption, kms_key_id or server_side_encryption = "aws:kms".

step9:-Create cloud front

CloudFront distributions, see the Amazon CloudFront Developer Guide. For specific information about creating CloudFront web distributions, see the POST Distribution page in the Amazon CloudFront API Reference.

resource “aws_cloudfront_distribution” “s3_distribution” {
origin {
domain_name = “${aws_s3_bucket.neeteshbucket1234.bucket_regional_domain_name}”
origin_id = “${local.s3_origin_id}”
}

enabled = true
is_ipv6_enabled = true
comment = “Imges”
default_root_object = “static_images”
default_cache_behavior {
allowed_methods = [“DELETE”, “GET”, “HEAD”, “OPTIONS”, “PATCH”, “POST”, “PUT”]
cached_methods = [“GET”, “HEAD”]
target_origin_id = “${local.s3_origin_id}”

forwarded_values {
query_string = false

cookies {
forward = “none”
}
}
viewer_protocol_policy = “allow-all”
min_ttl = 0
default_ttl = 3600
max_ttl = 86400
}
ordered_cache_behavior {
path_pattern = “/content/immutable/*”
allowed_methods = [“GET”, “HEAD”, “OPTIONS”]
cached_methods = [“GET”, “HEAD”, “OPTIONS”]
target_origin_id = “${local.s3_origin_id}”

forwarded_values {
query_string = false
headers = [“Origin”]

cookies {
forward = “none”
}
}

min_ttl = 0
default_ttl = 86400
max_ttl = 31536000
compress = true
viewer_protocol_policy = “redirect-to-https”
}

//# Cache behavior with precedence 1
ordered_cache_behavior {
path_pattern = “/content/*”
allowed_methods = [“GET”, “HEAD”, “OPTIONS”]
cached_methods = [“GET”, “HEAD”]
target_origin_id = “${local.s3_origin_id}”

forwarded_values {
query_string = false

cookies {
forward = “none”
}
}

min_ttl = 0
default_ttl = 3600
max_ttl = 86400
compress = true
viewer_protocol_policy = “redirect-to-https”
}

price_class = “PriceClass_200”

restrictions {
geo_restriction {
restriction_type = “whitelist”
locations = [“IN”]
}
}

tags = {
Environment = “production”
}

viewer_certificate {
cloudfront_default_certificate = true
}

step10:- Update Github code and connect the instance and launch the firefox

connection{
type=”ssh”
user= “ec2-user”
private_key =”${tls_private_key.privatekey.private_key_pem}”
host = aws_instance.web.public_ip
}
provisioner “remote-exec” {

inline = [

// “sudo su << \”EOF\” \n echo \”<img src=’${self.domain_name}’>\” >> /var/www/html/index.php \n \”EOF\””

“sudo su << EOF”,

“echo \”<center><img src=’http://${self.domain_name}/${aws_s3_bucket_object.fileupload.key}' height=’100px’ width=’200px’></center>\” >> /var/www/html/index.php”,

“EOF”

]

}
}

resource “null_resource” “nulllocal1” {

depends_on = [
null_resource.remote2,aws_cloudfront_distribution.s3_distribution
]

provisioner “local-exec” {
command = “firefox ${aws_instance.web.public_ip}”
}
}

--

--

Neeteesh Yadav
Neeteesh Yadav

Written by Neeteesh Yadav

Technical Enthusiast | MlOps(Machine learning + Operations)| DevOps Assembly Line| Hybrid Multi cloud

No responses yet