aws terraform
This commit is contained in:
parent
504863fddd
commit
2e1763bd42
199
.gitignore
vendored
199
.gitignore
vendored
@ -1,2 +1,197 @@
|
|||||||
token.txt
|
# Byte-compiled / optimized / DLL files
|
||||||
.env
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
|
||||||
|
# Local .terraform directories
|
||||||
|
**/.terraform/*
|
||||||
|
|
||||||
|
# .tfstate files
|
||||||
|
*.tfstate
|
||||||
|
*.tfstate.*
|
||||||
|
|
||||||
|
# Crash log files
|
||||||
|
crash.log
|
||||||
|
crash.*.log
|
||||||
|
|
||||||
|
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
|
||||||
|
# password, private keys, and other secrets. These should not be part of version
|
||||||
|
# control as they are data points which are potentially sensitive and subject
|
||||||
|
# to change depending on the environment.
|
||||||
|
*.tfvars
|
||||||
|
*.tfvars.json
|
||||||
|
|
||||||
|
# Ignore override files as they are usually used to override resources locally and so
|
||||||
|
# are not checked in
|
||||||
|
override.tf
|
||||||
|
override.tf.json
|
||||||
|
*_override.tf
|
||||||
|
*_override.tf.json
|
||||||
|
|
||||||
|
# Include override files you do wish to add to version control using negated pattern
|
||||||
|
# !example_override.tf
|
||||||
|
|
||||||
|
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
||||||
|
# example: *tfplan*
|
||||||
|
|
||||||
|
# Ignore CLI configuration files
|
||||||
|
.terraformrc
|
||||||
|
terraform.rc
|
||||||
|
|
||||||
|
token.txt
|
41
lab4/4.1-aws/.terraform.lock.hcl
Normal file
41
lab4/4.1-aws/.terraform.lock.hcl
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# This file is maintained automatically by "terraform init".
|
||||||
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/aws" {
|
||||||
|
version = "4.51.0"
|
||||||
|
hashes = [
|
||||||
|
"h1:1dDOWECxyLkSaaw+j7RwnCrHvL0e6UTxPItiPoAG+k4=",
|
||||||
|
"zh:10aebfd8f22f7a69a2fcfcf35cb17ebbc0966ac8e822a9a0e1c843e429389de7",
|
||||||
|
"zh:26661203ab083ec35a5ae2d9b516793d98f4380655bcd304724af7495aaa7c09",
|
||||||
|
"zh:27ad57b820666a252e64959a4369fe9e40df5bf5d37a6ee272cc9131a501448f",
|
||||||
|
"zh:5d7b1acfae1a7835509d8f501e4e731cac2246b9f5b6674b643790d6eaca8037",
|
||||||
|
"zh:62cb21d9c90fd6b7af5c4b4d47f3e2908c3c7087809f3faeb561c4a12b14cbf5",
|
||||||
|
"zh:731490aa24ffe958e23f67619b897c96178a1c628453b02727c69f15dc90ff7b",
|
||||||
|
"zh:7b85f3513da571db05ad43f6a1ba195ab33ce8284f03537b636b561c1ad43075",
|
||||||
|
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
|
||||||
|
"zh:9d8f3b176046f438768d8b1ba25d2bb8d234499a2c2c8fa0e1336c84ae708c41",
|
||||||
|
"zh:bec27993d9501fb84df58d7f0b3eed2c8243e61f3676e66de17a9e2a8ff2f5be",
|
||||||
|
"zh:cd27798991a86b44adab3969db96666dde12309caf55e40bffe90bd895a6edd3",
|
||||||
|
"zh:dba49fdf4339a941357944616b1bf79483c2bed31c235e4cd59698802c8d2fdb",
|
||||||
|
"zh:ed1ccf97ec02191e0840ac4fdbd2da21eea661b9d7e11c0f98b71ab67a3d3718",
|
||||||
|
"zh:edeb801e3c84e653dc3449a0b73b64d1ba167cef674863ae5106d9a063548c70",
|
||||||
|
"zh:f680647b4fce3d7603a24ae69c32cf6664fb0182844d00f18ddae3d5d878441c",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/template" {
|
||||||
|
version = "2.2.0"
|
||||||
|
hashes = [
|
||||||
|
"h1:N2IXrfDhxid5pFKHISN9V23ptHJyFZEdojUegdx5YkM=",
|
||||||
|
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
|
||||||
|
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
|
||||||
|
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
|
||||||
|
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
|
||||||
|
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
|
||||||
|
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
|
||||||
|
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
|
||||||
|
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
|
||||||
|
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
|
||||||
|
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
|
||||||
|
]
|
||||||
|
}
|
Binary file not shown.
Binary file not shown.
7
lab4/4.1-aws/Dockerfile
Normal file
7
lab4/4.1-aws/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
FROM hashicorp/terraform:latest
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN mkdir ~/.aws
|
||||||
|
RUN cp credentials ~/.aws
|
||||||
|
|
||||||
|
ENTRYPOINT terraform init && terraform apply -auto-approve && sleep 300 && terraform destroy -auto-approve
|
5
lab4/4.1-aws/deploy.sh
Normal file
5
lab4/4.1-aws/deploy.sh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
cp ~/.aws/credentials ./credentials
|
||||||
|
docker build -t aws-4-1 .
|
||||||
|
docker run aws-4-1
|
||||||
|
rm credentials
|
145
lab4/4.1-aws/main.tf
Normal file
145
lab4/4.1-aws/main.tf
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "main" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "cloud-vpc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "public_subnet" {
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
cidr_block = "10.0.0.0/24"
|
||||||
|
availability_zone = "us-east-1a"
|
||||||
|
map_public_ip_on_launch = true
|
||||||
|
tags = {
|
||||||
|
Name = "cloud-subnet"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_internet_gateway" "ig" {
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
tags = {
|
||||||
|
Name = "cloud-igw"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_eip" "nat_eip" {
|
||||||
|
vpc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_nat_gateway" "nat" {
|
||||||
|
allocation_id = aws_eip.nat_eip.id
|
||||||
|
subnet_id = element(aws_subnet.public_subnet.*.id, 0)
|
||||||
|
tags = {
|
||||||
|
Name = "cloud-nat"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "public" {
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
tags = {
|
||||||
|
Name = "cloud-public-route-table"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route" "public_internet_gateway" {
|
||||||
|
route_table_id = aws_route_table.public.id
|
||||||
|
destination_cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = aws_internet_gateway.ig.id
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "public" {
|
||||||
|
subnet_id = aws_subnet.public_subnet.id
|
||||||
|
route_table_id = aws_route_table.public.id
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "default" {
|
||||||
|
name = "cloud-default-sg"
|
||||||
|
description = "Default security group to allow inbound/outbound from the VPC"
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
description = "HTTP from VPC"
|
||||||
|
from_port = 8080
|
||||||
|
to_port = 8080
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
ipv6_cidr_blocks = ["::/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
ipv6_cidr_blocks = ["::/0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "user_data_server" {
|
||||||
|
template = file("user_data")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_instance" "server" {
|
||||||
|
count = 2
|
||||||
|
ami = "ami-0778521d914d23bc1"
|
||||||
|
instance_type = "t2.micro"
|
||||||
|
subnet_id = aws_subnet.public_subnet.id
|
||||||
|
vpc_security_group_ids = [aws_security_group.default.id]
|
||||||
|
ecs_associate_public_ip_address = false
|
||||||
|
user_data = data.template_file.user_data_server.rendered
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "cloud-server-${count.index}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_eip" "lb" {
|
||||||
|
network_border_group = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lb" "lb" {
|
||||||
|
name = "cloud-lb"
|
||||||
|
internal = false
|
||||||
|
load_balancer_type = "network"
|
||||||
|
subnet_mapping {
|
||||||
|
subnet_id = aws_subnet.public_subnet.id
|
||||||
|
allocation_id = aws_eip.lb.id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_lb_target_group" "main" {
|
||||||
|
name = "tg-cloud"
|
||||||
|
port = 8080
|
||||||
|
protocol = "TCP"
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lb_target_group_attachment" "group_attachment" {
|
||||||
|
count = 2
|
||||||
|
target_group_arn = aws_lb_target_group.main.arn
|
||||||
|
target_id = aws_instance.server[count.index].id
|
||||||
|
port = 8080
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lb_listener" "lb_listener" {
|
||||||
|
load_balancer_arn = aws_lb.lb.arn
|
||||||
|
port = "8080"
|
||||||
|
protocol = "TCP"
|
||||||
|
|
||||||
|
default_action {
|
||||||
|
type = "forward"
|
||||||
|
target_group_arn = aws_lb_target_group.main.arn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "lb_ip" {
|
||||||
|
value = "http://${aws_eip.lb.public_ip}:8080"
|
||||||
|
}
|
9
lab4/4.1-aws/terraform.tfstate
Normal file
9
lab4/4.1-aws/terraform.tfstate
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"terraform_version": "1.3.7",
|
||||||
|
"serial": 1,
|
||||||
|
"lineage": "ac82c3cf-72fa-6904-dcf0-eb0bf77eb839",
|
||||||
|
"outputs": {},
|
||||||
|
"resources": [],
|
||||||
|
"check_results": null
|
||||||
|
}
|
7
lab4/4.1-aws/user_data
Normal file
7
lab4/4.1-aws/user_data
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#cloud-config
|
||||||
|
packages:
|
||||||
|
- git
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- git clone https://git.wmi.amu.edu.pl/s444417/dpzc
|
||||||
|
- ./dpzc/lab4/4.1-hetzner/webservice
|
Binary file not shown.
Binary file not shown.
@ -1,384 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
from boto3_helper import *
|
|
||||||
|
|
||||||
# Check if the user has the Access & Secret key configured
|
|
||||||
import boto3
|
|
||||||
from boto3 import Session
|
|
||||||
|
|
||||||
session = init_aws_session()
|
|
||||||
# credentials = session.get_credentials()
|
|
||||||
# current_credentials = credentials.get_frozen_credentials()
|
|
||||||
#
|
|
||||||
# # Break & Exit if any of the key is not present
|
|
||||||
# if current_credentials.access_key is None:
|
|
||||||
# print("Access Key missing, use `aws configure` to setup")
|
|
||||||
# exit()
|
|
||||||
#
|
|
||||||
# if current_credentials.secret_key is None:
|
|
||||||
# print("Secret Key missing, use `aws configure` to setup")
|
|
||||||
# exit()
|
|
||||||
#
|
|
||||||
# VPC design for multi az deployments
|
|
||||||
globalVars = {}
|
|
||||||
globalVars['REGION_NAME'] = "ap-south-1"
|
|
||||||
globalVars['AZ1'] = "ap-south-1a"
|
|
||||||
globalVars['AZ2'] = "ap-south-1b"
|
|
||||||
globalVars['CIDRange'] = "10.240.0.0/23"
|
|
||||||
globalVars['az1_pvtsubnet_CIDRange'] = "10.240.0.0/25"
|
|
||||||
globalVars['az1_pubsubnet_CIDRange'] = "10.240.0.128/26"
|
|
||||||
globalVars['az1_sparesubnet_CIDRange'] = "10.240.0.192/26"
|
|
||||||
globalVars['az2_pvtsubnet_CIDRange'] = "10.240.1.0/25"
|
|
||||||
globalVars['az2_pubsubnet_CIDRange'] = "10.240.1.128/26"
|
|
||||||
globalVars['az2_sparesubnet_CIDRange'] = "10.240.1.192/26"
|
|
||||||
globalVars['Project'] = { 'Key': 'Name', 'Value': 'AutoScaling-Demo'}
|
|
||||||
globalVars['tags'] = [{'Key': 'Owner', 'Value': 'Miztiik'},
|
|
||||||
{'Key': 'Environment', 'Value': 'Test'},
|
|
||||||
{'Key': 'Department', 'Value': 'Valaxy-Training'}]
|
|
||||||
# EC2 Parameters
|
|
||||||
globalVars['EC2-RH-AMI-ID'] = "ami-cdbdd7a2"
|
|
||||||
globalVars['EC2-Amazon-AMI-ID'] = "ami-3c89f653"
|
|
||||||
globalVars['EC2-InstanceType'] = "t2.micro"
|
|
||||||
globalVars['EC2-KeyName'] = globalVars['Project']['Value']+'-Key'
|
|
||||||
|
|
||||||
# AutoScaling Parameters
|
|
||||||
globalVars['ASG-LaunchConfigName'] = "ASG-Demo-LaunchConfig"
|
|
||||||
globalVars['ASG-AutoScalingGroupName'] = "ASG-Demo-AutoScalingGrp"
|
|
||||||
|
|
||||||
# RDS Parameters
|
|
||||||
globalVars['RDS-DBIdentifier'] = "ProdDb01"
|
|
||||||
globalVars['RDS-Engine'] = "mysql"
|
|
||||||
globalVars['RDS-DBName'] = "WordPressDB"
|
|
||||||
globalVars['RDS-DBMasterUserName'] = "WpDdMasterUsr"
|
|
||||||
globalVars['RDS-DBMasterUserPass'] = "WpDdMasterUsrPass"
|
|
||||||
globalVars['RDS-DBInstanceClass'] = "db.t2.micro"
|
|
||||||
globalVars['RDS-DBSubnetGroup'] = "RDS-WP-DB-Subnet-Group"
|
|
||||||
|
|
||||||
# Creating a VPC, Subnet, and Gateway
|
|
||||||
ec2 = boto3.resource('ec2', region_name=globalVars['REGION_NAME'])
|
|
||||||
ec2Client = boto3.client('ec2', region_name=globalVars['REGION_NAME'])
|
|
||||||
vpc = ec2.create_vpc(CidrBlock=globalVars['CIDRange'])
|
|
||||||
asgClient = boto3.client('autoscaling', region_name=globalVars['REGION_NAME'])
|
|
||||||
rds = boto3.client('rds', region_name=globalVars['REGION_NAME'])
|
|
||||||
|
|
||||||
# AZ1 Subnets
|
|
||||||
az1_pvtsubnet = vpc.create_subnet(CidrBlock=globalVars['az1_pvtsubnet_CIDRange'], AvailabilityZone=globalVars['AZ1'])
|
|
||||||
az1_pubsubnet = vpc.create_subnet(CidrBlock=globalVars['az1_pubsubnet_CIDRange'], AvailabilityZone=globalVars['AZ1'])
|
|
||||||
az1_sparesubnet = vpc.create_subnet(CidrBlock=globalVars['az1_sparesubnet_CIDRange'], AvailabilityZone=globalVars['AZ1'])
|
|
||||||
# AZ2 Subnet
|
|
||||||
az2_pvtsubnet = vpc.create_subnet(CidrBlock=globalVars['az2_pvtsubnet_CIDRange'], AvailabilityZone=globalVars['AZ2'])
|
|
||||||
az2_pubsubnet = vpc.create_subnet(CidrBlock=globalVars['az2_pubsubnet_CIDRange'], AvailabilityZone=globalVars['AZ2'])
|
|
||||||
az2_sparesubnet = vpc.create_subnet(CidrBlock=globalVars['az2_sparesubnet_CIDRange'], AvailabilityZone=globalVars['AZ2'])
|
|
||||||
|
|
||||||
# Enable DNS Hostnames in the VPC
|
|
||||||
vpc.modify_attribute(EnableDnsSupport={'Value': True})
|
|
||||||
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
|
|
||||||
|
|
||||||
# Create the Internet Gatway & Attach to the VPC
|
|
||||||
intGateway = ec2.create_internet_gateway()
|
|
||||||
intGateway.attach_to_vpc(VpcId=vpc.id)
|
|
||||||
|
|
||||||
# Create another route table for Public & Private traffic
|
|
||||||
routeTable = ec2.create_route_table(VpcId=vpc.id)
|
|
||||||
rtbAssn=[]
|
|
||||||
rtbAssn.append(routeTable.associate_with_subnet(SubnetId=az1_pubsubnet.id))
|
|
||||||
rtbAssn.append(routeTable.associate_with_subnet(SubnetId=az1_pvtsubnet.id))
|
|
||||||
rtbAssn.append(routeTable.associate_with_subnet(SubnetId=az2_pubsubnet.id))
|
|
||||||
rtbAssn.append(routeTable.associate_with_subnet(SubnetId=az2_pvtsubnet.id))
|
|
||||||
|
|
||||||
# Create a route for internet traffic to flow out
|
|
||||||
intRoute = ec2Client.create_route(RouteTableId=routeTable.id, DestinationCidrBlock='0.0.0.0/0', GatewayId=intGateway.id)
|
|
||||||
|
|
||||||
# Tag the resources
|
|
||||||
vpc.create_tags (Tags=globalVars['tags'])
|
|
||||||
az1_pvtsubnet.create_tags (Tags=globalVars['tags'])
|
|
||||||
az1_pubsubnet.create_tags (Tags=globalVars['tags'])
|
|
||||||
az1_sparesubnet.create_tags(Tags=globalVars['tags'])
|
|
||||||
az2_pvtsubnet.create_tags (Tags=globalVars['tags'])
|
|
||||||
az2_pubsubnet.create_tags (Tags=globalVars['tags'])
|
|
||||||
az2_sparesubnet.create_tags(Tags=globalVars['tags'])
|
|
||||||
intGateway.create_tags (Tags=globalVars['tags'])
|
|
||||||
routeTable.create_tags (Tags=globalVars['tags'])
|
|
||||||
|
|
||||||
vpc.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-vpc'}])
|
|
||||||
az1_pvtsubnet.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-az1-private-subnet'}])
|
|
||||||
az1_pubsubnet.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-az1-public-subnet'}])
|
|
||||||
az1_sparesubnet.create_tags(Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-az1-spare-subnet'}])
|
|
||||||
az2_pvtsubnet.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-az2-private-subnet'}])
|
|
||||||
az2_pubsubnet.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-az2-public-subnet'}])
|
|
||||||
az2_sparesubnet.create_tags(Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-az2-spare-subnet'}])
|
|
||||||
intGateway.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-igw'}])
|
|
||||||
routeTable.create_tags (Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-rtb'}])
|
|
||||||
|
|
||||||
# Let create the Public & Private Security Groups
|
|
||||||
elbSecGrp = ec2.create_security_group(DryRun=False,
|
|
||||||
GroupName='elbSecGrp',
|
|
||||||
Description='ElasticLoadBalancer_Security_Group',
|
|
||||||
VpcId=vpc.id
|
|
||||||
)
|
|
||||||
|
|
||||||
pubSecGrp = ec2.create_security_group(DryRun=False,
|
|
||||||
GroupName='pubSecGrp',
|
|
||||||
Description='Public_Security_Group',
|
|
||||||
VpcId=vpc.id
|
|
||||||
)
|
|
||||||
|
|
||||||
pvtSecGrp = ec2.create_security_group(DryRun=False,
|
|
||||||
GroupName='pvtSecGrp',
|
|
||||||
Description='Private_Security_Group',
|
|
||||||
VpcId=vpc.id
|
|
||||||
)
|
|
||||||
|
|
||||||
elbSecGrp.create_tags(Tags=globalVars['tags'])
|
|
||||||
pubSecGrp.create_tags(Tags=globalVars['tags'])
|
|
||||||
pvtSecGrp.create_tags(Tags=globalVars['tags'])
|
|
||||||
|
|
||||||
elbSecGrp.create_tags(Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-elb-security-group'}])
|
|
||||||
pubSecGrp.create_tags(Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-public-security-group'}])
|
|
||||||
pvtSecGrp.create_tags(Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-private-security-group'}])
|
|
||||||
|
|
||||||
# Add a rule that allows inbound SSH, HTTP, HTTPS traffic ( from any source )
|
|
||||||
ec2Client.authorize_security_group_ingress(GroupId=elbSecGrp.id,
|
|
||||||
IpProtocol='tcp',
|
|
||||||
FromPort=80,
|
|
||||||
ToPort=80,
|
|
||||||
CidrIp='0.0.0.0/0'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Allow Public Security Group to receive traffic from ELB Security group
|
|
||||||
ec2Client.authorize_security_group_ingress(GroupId=pubSecGrp.id,
|
|
||||||
IpPermissions=[{'IpProtocol': 'tcp',
|
|
||||||
'FromPort': 80,
|
|
||||||
'ToPort': 80,
|
|
||||||
'UserIdGroupPairs': [{'GroupId': elbSecGrp.id}]
|
|
||||||
}]
|
|
||||||
)
|
|
||||||
# Allow Private Security Group to receive traffic from Application Security group
|
|
||||||
ec2Client.authorize_security_group_ingress(GroupId=pvtSecGrp.id,
|
|
||||||
IpPermissions=[{'IpProtocol': 'tcp',
|
|
||||||
'FromPort': 3306,
|
|
||||||
'ToPort': 3306,
|
|
||||||
'UserIdGroupPairs': [{'GroupId': pubSecGrp.id}]
|
|
||||||
}]
|
|
||||||
)
|
|
||||||
|
|
||||||
ec2Client.authorize_security_group_ingress(GroupId=pubSecGrp.id,
|
|
||||||
IpProtocol='tcp',
|
|
||||||
FromPort=80,
|
|
||||||
ToPort=80,
|
|
||||||
CidrIp='0.0.0.0/0'
|
|
||||||
)
|
|
||||||
ec2Client.authorize_security_group_ingress(GroupId=pubSecGrp.id,
|
|
||||||
IpProtocol='tcp',
|
|
||||||
FromPort=443,
|
|
||||||
ToPort=443,
|
|
||||||
CidrIp='0.0.0.0/0'
|
|
||||||
)
|
|
||||||
ec2Client.authorize_security_group_ingress(GroupId=pubSecGrp.id,
|
|
||||||
IpProtocol='tcp',
|
|
||||||
FromPort=22,
|
|
||||||
ToPort=22,
|
|
||||||
CidrIp='0.0.0.0/0'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Lets create the key-pair that we will use
|
|
||||||
## Check if key is already present
|
|
||||||
customEC2Keys = ec2Client.describe_key_pairs()['KeyPairs']
|
|
||||||
if not next((key for key in customEC2Keys if key["KeyName"] == globalVars['EC2-KeyName']), False):
|
|
||||||
ec2_key_pair = ec2.create_key_pair(KeyName=globalVars['EC2-KeyName'])
|
|
||||||
print("New Private Key created,Save the below key-material\n\n")
|
|
||||||
print(ec2_key_pair.key_material)
|
|
||||||
|
|
||||||
# Using the userdata field, we will download, install & configure our basic word press website.
|
|
||||||
# The user defined code to install Wordpress, WebServer & Configure them
|
|
||||||
userDataCode = """
|
|
||||||
#!/bin/bash
|
|
||||||
set -e -x
|
|
||||||
# Setting up the HTTP server
|
|
||||||
yum install -y httpd php php-mysql mysql-server
|
|
||||||
service httpd start
|
|
||||||
service mysqld start
|
|
||||||
chkconfig httpd on
|
|
||||||
groupadd www
|
|
||||||
usermod -a -G www ec2-user
|
|
||||||
# Download wordpress site & move to http
|
|
||||||
cd /var/www/
|
|
||||||
curl -O https://wordpress.org/latest.tar.gz && tar -zxf latest.tar.gz
|
|
||||||
rm -rf /var/www/html
|
|
||||||
mv wordpress /var/www/html
|
|
||||||
# Set the permissions
|
|
||||||
chown -R root:www /var/www
|
|
||||||
chmod 2775 /var/www
|
|
||||||
find /var/www -type d -exec chmod 2775 {} +
|
|
||||||
find /var/www -type f -exec chmod 0664 {} +
|
|
||||||
# SE Linux permissive
|
|
||||||
# needed to make wp connect to DB over newtork
|
|
||||||
# setsebool -P httpd_can_network_connect=1
|
|
||||||
# setsebool httpd_can_network_connect_db on
|
|
||||||
service httpd restart
|
|
||||||
# Remove below file after testing
|
|
||||||
echo "<?php phpinfo(); ?>" > /var/www/html/phptestinfo.php
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Create the Public Word Press Instance
|
|
||||||
##### **DeviceIndex**:The network interface's position in the attachment order. For example, the first attached network interface has a DeviceIndex of 0
|
|
||||||
instanceLst = ec2.create_instances(ImageId=globalVars['EC2-Amazon-AMI-ID'],
|
|
||||||
MinCount=1,
|
|
||||||
MaxCount=1,
|
|
||||||
KeyName=globalVars['EC2-KeyName'],
|
|
||||||
UserData=userDataCode,
|
|
||||||
InstanceType=globalVars['EC2-InstanceType'],
|
|
||||||
NetworkInterfaces=[
|
|
||||||
{
|
|
||||||
'SubnetId': az1_pubsubnet.id,
|
|
||||||
'Groups': [pubSecGrp.id],
|
|
||||||
'DeviceIndex': 0,
|
|
||||||
'DeleteOnTermination': True,
|
|
||||||
'AssociatePublicIpAddress': True,
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Create the Launch Configuration
|
|
||||||
# InstanceId = 'string'
|
|
||||||
asgLaunchConfig = asgClient.create_launch_configuration(
|
|
||||||
LaunchConfigurationName=globalVars['ASG-LaunchConfigName'],
|
|
||||||
ImageId=globalVars['EC2-Amazon-AMI-ID'],
|
|
||||||
KeyName=globalVars['EC2-KeyName'],
|
|
||||||
SecurityGroups=[pubSecGrp.id],
|
|
||||||
UserData=userDataCode,
|
|
||||||
InstanceType=globalVars['EC2-InstanceType'],
|
|
||||||
InstanceMonitoring={'Enabled': False },
|
|
||||||
EbsOptimized=False,
|
|
||||||
AssociatePublicIpAddress=False
|
|
||||||
)
|
|
||||||
|
|
||||||
# create Auto-Scaling Group
|
|
||||||
ASGSubnets = az1_pubsubnet.id + "," +az2_pubsubnet.id
|
|
||||||
asGroup=asgClient.create_auto_scaling_group(
|
|
||||||
AutoScalingGroupName=globalVars['ASG-AutoScalingGroupName'],
|
|
||||||
LaunchConfigurationName=globalVars['ASG-LaunchConfigName'],
|
|
||||||
MinSize=1,
|
|
||||||
MaxSize=3,
|
|
||||||
DesiredCapacity=2,
|
|
||||||
DefaultCooldown=120,
|
|
||||||
HealthCheckType='EC2',
|
|
||||||
HealthCheckGracePeriod=60,
|
|
||||||
Tags=globalVars['tags'],
|
|
||||||
VPCZoneIdentifier=ASGSubnets
|
|
||||||
)
|
|
||||||
|
|
||||||
asgClient.create_or_update_tags(
|
|
||||||
Tags=[
|
|
||||||
{
|
|
||||||
'ResourceId': globalVars['ASG-AutoScalingGroupName'],
|
|
||||||
'ResourceType': 'auto-scaling-group',
|
|
||||||
'Key': 'Name',
|
|
||||||
'Value': globalVars['Project']['Value'] + '-ASG-Group',
|
|
||||||
'PropagateAtLaunch': True
|
|
||||||
},
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
## First lets create the RDS Subnet Groups
|
|
||||||
rdsDBSubnetGrp = rds.create_db_subnet_group(DBSubnetGroupName=globalVars['RDS-DBSubnetGroup'],
|
|
||||||
DBSubnetGroupDescription=globalVars['RDS-DBSubnetGroup'],
|
|
||||||
SubnetIds=[az1_pvtsubnet.id, az2_pvtsubnet.id],
|
|
||||||
Tags=[{'Key': 'Name',
|
|
||||||
'Value': globalVars['Project']['Value'] + '-DB-Subnet-Group'}]
|
|
||||||
)
|
|
||||||
|
|
||||||
rdsInstance = rds.create_db_instance(DBInstanceIdentifier=globalVars['RDS-DBIdentifier'],
|
|
||||||
AllocatedStorage=5,
|
|
||||||
DBName=globalVars['RDS-DBName'],
|
|
||||||
Engine=globalVars['RDS-Engine'],
|
|
||||||
Port=3306,
|
|
||||||
# General purpose SSD
|
|
||||||
StorageType='gp2',
|
|
||||||
StorageEncrypted=False,
|
|
||||||
AutoMinorVersionUpgrade=False,
|
|
||||||
# Set this to true later?
|
|
||||||
MultiAZ=False,
|
|
||||||
MasterUsername=globalVars['RDS-DBMasterUserName'],
|
|
||||||
MasterUserPassword=globalVars['RDS-DBMasterUserPass'],
|
|
||||||
DBInstanceClass=globalVars['RDS-DBInstanceClass'],
|
|
||||||
VpcSecurityGroupIds=[pvtSecGrp.id],
|
|
||||||
DBSubnetGroupName=globalVars['RDS-DBSubnetGroup'],
|
|
||||||
CopyTagsToSnapshot=True,
|
|
||||||
Tags=[{'Key': 'Name', 'Value': globalVars['Project']['Value'] + '-RDS-Instance'}])
|
|
||||||
|
|
||||||
# Lets wait until the RDS is created successfully
|
|
||||||
## Polls RDS.Client.describe_db_instances() every 30 seconds until a successful state is reached. An error is returned after 60 failed checks.
|
|
||||||
|
|
||||||
waiter = rds.get_waiter('db_instance_available')
|
|
||||||
waiter.wait(DBInstanceIdentifier=globalVars['RDS-DBIdentifier'])
|
|
||||||
|
|
||||||
resp = rds.describe_db_instances(DBInstanceIdentifier= globalVars['RDS-DBIdentifier'])
|
|
||||||
db_instances = resp['DBInstances']
|
|
||||||
if len(db_instances) != 1:
|
|
||||||
raise Exception('Whoa!!! More than one DB instance returned; this should not have happened')
|
|
||||||
db_instance = db_instances[0]
|
|
||||||
status = db_instance['DBInstanceStatus']
|
|
||||||
if status == 'available':
|
|
||||||
rdsEndpointDict = db_instance['Endpoint']
|
|
||||||
globalVars['Endpoint'] = rdsEndpointDict['Address']
|
|
||||||
# port = endpoint['Port']
|
|
||||||
|
|
||||||
|
|
||||||
###### Print to Screen ########
|
|
||||||
print("VPC ID : {0}".format(vpc.id))
|
|
||||||
print("AZ1 Public Subnet ID : {0}".format(az1_pubsubnet.id))
|
|
||||||
print("AZ1 Private Subnet ID : {0}".format(az1_pvtsubnet.id))
|
|
||||||
print("AZ1 Spare Subnet ID : {0}".format(az1_sparesubnet.id))
|
|
||||||
print("Internet Gateway ID : {0}".format(intGateway.id))
|
|
||||||
print("Route Table ID : {0}".format(routeTable.id))
|
|
||||||
print("Public Security Group ID : {0}".format(pubSecGrp.id))
|
|
||||||
print("Private Security Group ID : {0}".format(pvtSecGrp.id))
|
|
||||||
print("EC2 Key Pair : {0}".format(globalVars['EC2-KeyName']))
|
|
||||||
print("EC2 PublicIP : {0}".format(globalVars['EC2-KeyName']))
|
|
||||||
print("RDS Endpoint : {0}".format(globalVars['Endpoint']))
|
|
||||||
###### Print to Screen ########
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Function to clean up all the resources
|
|
||||||
"""
|
|
||||||
def cleanAll(resourcesDict=None):
|
|
||||||
# Delete the RDS Instance
|
|
||||||
waiter = rds.get_waiter('db_instance_deleted')
|
|
||||||
waiter.wait(DBInstanceIdentifier=globalVars['RDS-DBIdentifier'])
|
|
||||||
rds.delete_db_instance(DBInstanceIdentifier=globalVars['RDS-DBIdentifier'], SkipFinalSnapshot=True)
|
|
||||||
# Delete the instances
|
|
||||||
ids = []
|
|
||||||
for i in instanceLst:
|
|
||||||
ids.append(i.id)
|
|
||||||
|
|
||||||
ec2.instances.filter(InstanceIds=ids).terminate()
|
|
||||||
|
|
||||||
# Wait for the instance to be terminated
|
|
||||||
waiter = ec2Client.get_waiter('instance_terminated')
|
|
||||||
waiter.wait(InstanceIds=[ids])
|
|
||||||
ec2Client.delete_key_pair(KeyName=globalVars['EC2-KeyName'])
|
|
||||||
|
|
||||||
# Delete Routes & Routing Table
|
|
||||||
for assn in rtbAssn:
|
|
||||||
ec2Client.disassociate_route_table(AssociationId=assn.id)
|
|
||||||
|
|
||||||
routeTable.delete()
|
|
||||||
|
|
||||||
# Delete Subnets
|
|
||||||
az1_pvtsubnet.delete()
|
|
||||||
az1_pubsubnet.delete()
|
|
||||||
az1_sparesubnet.delete()
|
|
||||||
|
|
||||||
# Detach & Delete internet Gateway
|
|
||||||
ec2Client.detach_internet_gateway(InternetGatewayId=intGateway.id, VpcId=vpc.id)
|
|
||||||
intGateway.delete()
|
|
||||||
|
|
||||||
# Delete Security Groups
|
|
||||||
pubSecGrp.delete()
|
|
||||||
pvtSecGrp.delete()
|
|
||||||
|
|
||||||
vpc.delete()
|
|
@ -1,42 +0,0 @@
|
|||||||
import pprint
|
|
||||||
from boto3_helper import *
|
|
||||||
|
|
||||||
# find a security group that allows port 80 and tcp
|
|
||||||
security_groups = ec2_get_security_group_list()
|
|
||||||
for security_group in security_groups:
|
|
||||||
security_group_ip_perms = security_group['IpPermissions']
|
|
||||||
for security_group_ip_perm in security_group_ip_perms:
|
|
||||||
if security_group_ip_perm['IpProtocol'] == 'tcp' and security_group_ip_perm['FromPort'] == 80:
|
|
||||||
vpc_id = security_group['VpcId']
|
|
||||||
security_group_id = security_group['GroupId']
|
|
||||||
break
|
|
||||||
|
|
||||||
# find subnet and VPC ID associated with security group
|
|
||||||
subnet_list = ec2_get_subnet_list()
|
|
||||||
subnet_id_list = []
|
|
||||||
for subnet in subnet_list:
|
|
||||||
if subnet['VpcId'] == vpc_id:
|
|
||||||
subnet_id_list.append(subnet['SubnetId'])
|
|
||||||
|
|
||||||
print (subnet_id_list, vpc_id, security_group_id)
|
|
||||||
|
|
||||||
# Create a target group
|
|
||||||
target_group = elb_create_target_group('unbiased-coder-target-group', vpc_id)
|
|
||||||
target_group_arn = target_group['TargetGroups'][0]['TargetGroupArn']
|
|
||||||
|
|
||||||
session = init_aws_session()
|
|
||||||
elb = session.client('elbv2')
|
|
||||||
|
|
||||||
response = elb.create_load_balancer(
|
|
||||||
Name='UnbiasedCoderLoadBalancer',
|
|
||||||
Subnets = subnet_id_list,
|
|
||||||
SecurityGroups=[
|
|
||||||
security_group_id,
|
|
||||||
],
|
|
||||||
|
|
||||||
Scheme='internal',
|
|
||||||
|
|
||||||
Type='application',
|
|
||||||
IpAddressType='ipv4',
|
|
||||||
)
|
|
||||||
pprint.pprint(response)
|
|
@ -1,7 +0,0 @@
|
|||||||
import pprint
|
|
||||||
from boto3_helper import init_aws_session
|
|
||||||
|
|
||||||
session = init_aws_session()
|
|
||||||
elb = session.client('elbv2')
|
|
||||||
response = elb.describe_load_balancers()
|
|
||||||
pprint.pprint(response['LoadBalancers'])
|
|
@ -1,40 +0,0 @@
|
|||||||
import os
|
|
||||||
from urllib import response
|
|
||||||
import boto3
|
|
||||||
import pprint
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
load_dotenv(dotenv_path="lab4/.env")
|
|
||||||
|
|
||||||
def get_aws_keys():
|
|
||||||
return os.getenv('aws_access_key_id'), os.getenv('aws_secret_access_key')
|
|
||||||
|
|
||||||
def init_aws_session():
|
|
||||||
aws_access_key_id, aws_secret_access_key = get_aws_keys()
|
|
||||||
return boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=os.getenv("aws_session_token"), region_name=os.getenv('AWS_REGION'))
|
|
||||||
|
|
||||||
def ec2_get_vpc_list():
|
|
||||||
session = init_aws_session()
|
|
||||||
ec2 = session.client('ec2')
|
|
||||||
response = ec2.describe_vpcs()
|
|
||||||
return response['Vpcs']
|
|
||||||
|
|
||||||
def elb_create_target_group(target_group_name, vpc_id):
|
|
||||||
session = init_aws_session()
|
|
||||||
elb = session.client('elbv2')
|
|
||||||
response = elb.create_target_group(Name=target_group_name, Protocol='HTTP', Port=80, VpcId=vpc_id)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def ec2_get_subnet_list():
|
|
||||||
session = init_aws_session()
|
|
||||||
ec2 = session.client('ec2')
|
|
||||||
response = ec2.describe_subnets()
|
|
||||||
return response['Subnets']
|
|
||||||
|
|
||||||
def ec2_get_security_group_list():
|
|
||||||
session = init_aws_session()
|
|
||||||
ec2 = session.client('ec2')
|
|
||||||
response = ec2.describe_security_groups()
|
|
||||||
return response['SecurityGroups']
|
|
||||||
|
|
||||||
# src: https://unbiased-coder.com/boto3-load-balancer-guide/
|
|
@ -1,9 +0,0 @@
|
|||||||
import os
|
|
||||||
import boto.ec2.elb
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
load_dotenv(dotenv_path="lab4/.env")
|
|
||||||
region_name=os.getenv('AWS_REGION')
|
|
||||||
elb = boto.ec2.elb.connect_to_region(region_name)
|
|
||||||
|
|
||||||
print(elb)
|
|
@ -1,128 +0,0 @@
|
|||||||
import requests
|
|
||||||
import random
|
|
||||||
import math
|
|
||||||
import time
|
|
||||||
import threading
|
|
||||||
import logging
|
|
||||||
logging.getLogger().setLevel(logging.INFO)
|
|
||||||
|
|
||||||
|
|
||||||
API_URL="http://localhost:8080"
|
|
||||||
|
|
||||||
|
|
||||||
UNIT = 5.0 # secs
|
|
||||||
|
|
||||||
# Pre generated primes
|
|
||||||
first_primes_list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
|
|
||||||
31, 37, 41, 43, 47, 53, 59, 61, 67,
|
|
||||||
71, 73, 79, 83, 89, 97, 101, 103,
|
|
||||||
107, 109, 113, 127, 131, 137, 139,
|
|
||||||
149, 151, 157, 163, 167, 173, 179,
|
|
||||||
181, 191, 193, 197, 199, 211, 223,
|
|
||||||
227, 229, 233, 239, 241, 251, 257,
|
|
||||||
263, 269, 271, 277, 281, 283, 293,
|
|
||||||
307, 311, 313, 317, 331, 337, 347, 349]
|
|
||||||
|
|
||||||
|
|
||||||
def nBitRandom(n):
|
|
||||||
return random.randrange(2**(n-1)+1, 2**n - 1)
|
|
||||||
|
|
||||||
def getLowLevelPrime(n):
|
|
||||||
'''Generate a prime candidate divisible
|
|
||||||
by first primes'''
|
|
||||||
while True:
|
|
||||||
# Obtain a random number
|
|
||||||
pc = nBitRandom(n)
|
|
||||||
|
|
||||||
# Test divisibility by pre-generated
|
|
||||||
# primes
|
|
||||||
for divisor in first_primes_list:
|
|
||||||
if pc % divisor == 0 and divisor**2 <= pc:
|
|
||||||
break
|
|
||||||
else: return pc
|
|
||||||
|
|
||||||
def isMillerRabinPassed(mrc):
|
|
||||||
'''Run 20 iterations of Rabin Miller Primality test'''
|
|
||||||
maxDivisionsByTwo = 0
|
|
||||||
ec = mrc-1
|
|
||||||
while ec % 2 == 0:
|
|
||||||
ec >>= 1
|
|
||||||
maxDivisionsByTwo += 1
|
|
||||||
assert(2**maxDivisionsByTwo * ec == mrc-1)
|
|
||||||
|
|
||||||
def trialComposite(round_tester):
|
|
||||||
if pow(round_tester, ec, mrc) == 1:
|
|
||||||
return False
|
|
||||||
for i in range(maxDivisionsByTwo):
|
|
||||||
if pow(round_tester, 2**i * ec, mrc) == mrc-1:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Set number of trials here
|
|
||||||
numberOfRabinTrials = 20
|
|
||||||
for i in range(numberOfRabinTrials):
|
|
||||||
round_tester = random.randrange(2, mrc)
|
|
||||||
if trialComposite(round_tester):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def random_large_prime(bits):
|
|
||||||
while True:
|
|
||||||
prime_candidate = getLowLevelPrime(bits)
|
|
||||||
if not isMillerRabinPassed(prime_candidate):
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
return prime_candidate
|
|
||||||
|
|
||||||
def thread_function(i, fast, timeout):
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
c = 5 # bits: 20: 200ms; 21: 350ms; 22: 700ms 23: 1.5s; 25: 6s; 26: 10s; 27: 24s
|
|
||||||
bits = 19 if fast else 23
|
|
||||||
last_report = time.time()
|
|
||||||
processing_time = 0.0
|
|
||||||
reqs = 0
|
|
||||||
while True:
|
|
||||||
iter_start = time.time()
|
|
||||||
if iter_start - start > timeout:
|
|
||||||
logging.info("Thread: %d\treqs: %d\tmean time: %.3fs\t%s"%(i, reqs, processing_time/reqs if reqs>0 else 0.0, "fast\t" if fast else ""))
|
|
||||||
results[i][iter_start] = processing_time/reqs if reqs>0 else 0.0
|
|
||||||
return
|
|
||||||
if iter_start - last_report > UNIT/2:
|
|
||||||
if len(results[i])%2 == 0:
|
|
||||||
logging.info("Thread: %d\treqs: %d\tmean time: %.3fs\t%s"%(i, reqs, processing_time/reqs if reqs>0 else 0.0, "fast\t" if fast else ""))
|
|
||||||
results[i][iter_start] = processing_time/reqs if reqs>0 else 0.0
|
|
||||||
processing_time = 0.0
|
|
||||||
reqs = 0
|
|
||||||
last_report=iter_start
|
|
||||||
|
|
||||||
factors = [random_large_prime(bits) for i in range(c)]
|
|
||||||
factors.sort()
|
|
||||||
n=math.prod(factors)
|
|
||||||
|
|
||||||
r = requests.get(API_URL+'/factors/%d'%(n))
|
|
||||||
if r.status_code != 200:
|
|
||||||
logging.error("wrong status code from webservice")
|
|
||||||
else:
|
|
||||||
result = r.json()
|
|
||||||
if result != factors:
|
|
||||||
logging.error("Wrong factors")
|
|
||||||
|
|
||||||
processing_time+=time.time() - iter_start
|
|
||||||
reqs+=1
|
|
||||||
time.sleep(0.5)
|
|
||||||
|
|
||||||
START = time.time()
|
|
||||||
slow_threads = 4
|
|
||||||
|
|
||||||
results = [ {} for i in range(slow_threads+1)]
|
|
||||||
|
|
||||||
t0 = threading.Thread(target=thread_function, args=(0, True, (5 + slow_threads*3) * UNIT))
|
|
||||||
t0.start()
|
|
||||||
time.sleep(2 * UNIT)
|
|
||||||
for i in range(slow_threads):
|
|
||||||
t = threading.Thread(target=thread_function, args=(i+1, False, (slow_threads-i) * 3 * UNIT))
|
|
||||||
t.start()
|
|
||||||
time.sleep(2 * UNIT)
|
|
||||||
|
|
||||||
t0.join()
|
|
Loading…
Reference in New Issue
Block a user