AWS
Fetching EC2 Metadata
Updated: December 11, 2024
TOKEN=`curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"` && curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data
The above will give you the metadata fields that are available - append to the URL (Eg. "meta-data/hostname" or "meta-data/placement/region")
AWS CLI
Updated: August 31, 2024
[sudo] python -m pip install [--user] [--upgrade] awscli [--ignore-installed six]
Set credentials with:
aws configure / set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY / create ~/.aws/credentials
# S3 commands
aws s3 ls
# Minimal EC2 command
aws ec2 run-instances --image-id ami-031a2b1167c7825e1 --instance-type t3.micro --key-name "keypair1" --region me-central-1
# Security Group and EC2 commands
NEWGROUP=(aws ec2 create-security-group --group-name nitin --description "nitin security" --vpc-id vpc-01234567) | jq -r .GroupId
aws ec2 authorize-security-group-ingress --group-id $NEWGROUP --protocol tcp --port 27000-28000 --source-group $NEWGROUP
aws ec2 run-instances --image-id ami-0123456789 \
--count 1 \
--instance-type t2.small \
--key-name jlp-tradecraft \
--security-group-ids $NEWGROUP \
--subnet-id $SUBNET \
--block-device-mappings '[{"DeviceName": "/dev/xvda", "Ebs": {"DeleteOnTermination": true, "Iops": 500, "VolumeSize": 25, "VolumeType": "io1"}}]' \
--tag-specification "$TAGS"
aws ec2 describe-instances --filters "Name=tag:Owner,Values=nitin" "Name=instance-state-name,Values=running"
aws ec2 terminate-instances --instance-ids xxxxxxx
# Route 53 commands
echo '{
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "something", "Type": "A", "TTL": 300,
"ResourceRecords": [{"Value": "127.0.0.1"}]
}
}
]
"Comment": "Some thing here",
}' > s3Change.json
aws route53 change-resource-record-sets --hosted-zone-id the-zone-id --change-batch file://s3Change.json
Terraform Provider
Updated: August 31, 2024
# The provider
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.19.0"
}
}
}
provider "aws" {
region = "me-central-1"
access_key = ""
secret_key = ""
}
# Networking
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "Project VPC"
}
}
variable "public_subnet_cidrs" {
type = list(string)
description = "Public Subnet CIDR values"
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
}
variable "azs" {
type = list(string)
description = "Availability Zones"
default = ["me-central-1a", "me-central-1b", "me-central-1c"]
}
resource "aws_subnet" "public_subnets" {
count = length(var.public_subnet_cidrs)
vpc_id = aws_vpc.main.id
cidr_block = element(var.public_subnet_cidrs, count.index)
availability_zone = element(var.azs, count.index)
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.main.id
}
resource "aws_route_table" "second_rt" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
}
resource "aws_route_table_association" "public_subnet_asso" {
count = length(var.public_subnet_cidrs)
subnet_id = element(aws_subnet.public_subnets[*].id, count.index)
route_table_id = aws_route_table.second_rt.id
}
resource "aws_vpc_endpoint" "pl-aws" { # This vpce ID goes into the Atlas PrivateLink
vpc_id = "${aws_vpc.main.id}"
service_name = "some_name"
route_table_ids = ["${aws_route_table.second_rt.id}"]
}
# S3 Bucket
resource "aws_s3_bucket" "BackupBucket" {
bucket = "backup-bucket"
}
# KMS
resource "aws_kms_key" "a" {
description = "KMS key 1"
deletion_window_in_days = 10
}
resource "aws_kms_alias" "atlas" {
target_key_id = aws_kms_key.atlas.id
name = "alias/atlas_1"
#name = "alias/${local.resource_prefix}"
}
# IAM
resource "aws_iam_role" "grant_atlas_access_from_other_role" {
name = "test-role"
assume_role_policy = jsonencode({
Version = "2012-10-17",
Statement = [
{
Effect = "Allow",
Principal = {
AWS = "arn:aws:iam::123456789012:role/atlasRole1"
},
Action = "sts:AssumeRole",
Condition = {
StringEquals = {
"sts:ExternalId" = "external-id-1"
}
}
}
]
})
}
resource "aws_iam_role_policy" "grant_access_to_s3" {
name = "some name here"
role = aws_iam_role.grant_atlas_access_to_some_aws.id
policy = <<-EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::${aws_s3_bucket.test_bucket.bucket}"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::${aws_s3_bucket.test_bucket.bucket}/*"
}]
}
EOF
}
resource "aws_iam_role_policy" "grant_access_to_kms" {
name = "some name here"
role = aws_iam_role.grant_atlas_access_to_some_aws.id
policy = <<-EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"kms:Decrypt",
"kms:Encrypt",
"kms:DescribeKey"
],
"Resource": [
"${aws_kms_key.atlas.arn}"
]
}
]
}
EOF
}
}
output "ip" {
value = aws_instance.example_Server.public_ip
description = "ip"
}
resource "local_file" "host" {
content = <<-EOF
${aws_instance.example_Server.public_dns} ${aws_instance.example_Server.public_ip}
EOF
filename = "hosts.txt"
}
AWS Vault
Updated: September 1, 2024
Start with "aws config" and set it up for SSO.
[profile default]
sso_start_url = https://pockettheories.com/start
sso_region = us-east-1
sso_account_id = 0123456789
sso_role_name = SuperUser
region = us-east-1
Using "aws sso login", ensure that it works.
Next, install aws-vault and run
aws-vault exec default -- [your command here]
You can also get the environment variables and use them in your code. aws-vault automatically gets you to login when the session expires.
Send Data to CloudWatch
Updated: November 24, 2024
import boto3
cloudwatch = boto3.client('cloudwatch', region_name='ap-south-1')
server_metrics = []
server_metrics.append({'a': 1})
cloudwatch.put_metric_data(Namespace='something', MetricData=server_metrics)
EC2
Updated: August 31, 2024