-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathkubernetes.tf
155 lines (134 loc) · 5.35 KB
/
kubernetes.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
provider "aws" {
region = var.region
profile = var.aws_profile
}
data "aws_availability_zones" "available" {}
# reserve Elastic IP to be used in our NAT gateway
resource "aws_eip" "nat_gw_elastic_ip" {
vpc = true
tags = merge(
var.additional_tags,
{
Name = "${var.cluster_name}-nat-eip"
},
)
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "${var.name_prefix}-vpc"
cidr = var.vpc_cidr
azs = data.aws_availability_zones.available.names
private_subnets = [
# this loop will create a one-line list as ["10.0.0.0/20", "10.0.16.0/20", "10.0.32.0/20", ...]
# with a length depending on how many Zones are available
for zone_id in data.aws_availability_zones.available.zone_ids :
cidrsubnet(var.vpc_cidr, var.subnet_prefix_extension, tonumber(substr(zone_id, length(zone_id) - 1, 1)) - 1)
]
public_subnets = [
# this loop will create a one-line list as ["10.0.128.0/20", "10.0.144.0/20", "10.0.160.0/20", ...]
# with a length depending on how many Zones are available
# there is a zone Offset variable, to make sure no collisions are present with private subnet blocks
for zone_id in data.aws_availability_zones.available.zone_ids :
cidrsubnet(var.vpc_cidr, var.subnet_prefix_extension, tonumber(substr(zone_id, length(zone_id) - 1, 1)) + var.zone_offset - 1)
]
# enable single NAT Gateway to save some money
# WARNING: this could create a single point of failure, since we are creating a NAT Gateway in one AZ only
# feel free to change these options if you need to ensure full Availability without the need of running 'terraform apply'
# reference: https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/2.44.0#nat-gateway-scenarios
enable_nat_gateway = true
single_nat_gateway = true
one_nat_gateway_per_az = false
enable_dns_hostnames = true
reuse_nat_ips = true
external_nat_ip_ids = [aws_eip.nat_gw_elastic_ip.id]
# add VPC/Subnet tags required by EKS
tags = merge(
var.additional_tags,
{
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
},
)
public_subnet_tags = merge(
var.additional_tags,
{
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
},
)
private_subnet_tags = merge(
var.additional_tags,
{
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
},
)
}
locals {
worker_groups_launch_template = [
{
override_instance_types = var.asg_instance_types
spot_instance_pools = 2
asg_desired_capacity = var.autoscaling_minimum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
asg_min_size = var.autoscaling_minimum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
asg_max_size = var.autoscaling_maximum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = true
},
]
}
module "eks-cluster" {
source = "terraform-aws-modules/eks/aws"
version = "17.1.0"
cluster_name = var.cluster_name
cluster_version = "1.21"
write_kubeconfig = false
subnets = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
worker_groups_launch_template = local.worker_groups_launch_template
}
# get EKS cluster info to configure Kubernetes and Helm providers
data "aws_eks_cluster" "cluster" {
name = module.eks-cluster.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks-cluster.cluster_id
}
# get EKS authentication for being able to manage k8s objects from terraform
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
}
resource "aws_iam_openid_connect_provider" "cluster" {
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = []
url = data.aws_eks_cluster.cluster.identity.0.oidc.0.issuer
}
# deploy spot termination handler
resource "helm_release" "spot_termination_handler" {
name = var.spot_termination_handler_chart_name
chart = var.spot_termination_handler_chart_name
repository = var.spot_termination_handler_chart_repo
version = var.spot_termination_handler_chart_version
namespace = var.spot_termination_handler_chart_namespace
}
# add spot fleet Autoscaling policy
resource "aws_autoscaling_policy" "eks_autoscaling_policy" {
count = length(local.worker_groups_launch_template)
name = "${module.eks-cluster.workers_asg_names[count.index]}-autoscaling-policy"
autoscaling_group_name = module.eks-cluster.workers_asg_names[count.index]
policy_type = "TargetTrackingScaling"
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = var.autoscaling_average_cpu
}
}