-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tf
137 lines (111 loc) · 3.3 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
terraform {
required_version = "~> 1.6.3"
required_providers {
aws = "~> 5.25"
kubernetes = "~> 2.23.0"
}
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
command = "aws"
}
}
data "aws_availability_zones" "available" {}
locals {
# VPC
vpc_name = "terraforming-aws-vpc"
azs = slice(data.aws_availability_zones.available.names, 0, 2)
vpc_cidr = "10.0.0.0/16"
partition = cidrsubnets(local.vpc_cidr, 1, 1)
private_subnets = cidrsubnets(local.partition[0], 2, 2)
public_subnets = cidrsubnets(local.partition[1], 2, 2)
# EKS
cluster_name = "terraforming-aws-eks"
cluster_version = "1.28" # current latest
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 5.1"
name = local.vpc_name
cidr = local.vpc_cidr
azs = local.azs
private_subnets = local.private_subnets
public_subnets = local.public_subnets
private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}
public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}
# Single NAT Gateway; minimize the costs
# ref. https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest#single-nat-gateway
enable_nat_gateway = true
single_nat_gateway = true
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 19.16"
cluster_name = local.cluster_name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
eks_managed_node_groups = {
default_ng = {
min_size = 1
max_size = 2
desired_size = 2
instance_types = ["t2.small"]
capacity_type = "SPOT"
}
}
}
resource "terraform_data" "eks_kubeconfig" {
provisioner "local-exec" {
command = <<EOT
aws eks update-kubeconfig \
--name ${module.eks.cluster_name} \
--alias ${module.eks.cluster_name} \
--user-alias ${module.eks.cluster_name}
sed -i s%${trimsuffix(module.eks.cluster_arn, module.eks.cluster_name)}%% -- $HOME/.kube/config
echo <<EOF
Caveats:
Your current kubeconfig is updated, the context, cluster and user are with same name ${module.eks.cluster_name}.
Remove when destorying this resources:
kubectl ctx -d ${module.eks.cluster_name} # or "kubectl config delete-context ${module.eks.cluster_name}"
kubectl config delete-cluster ${module.eks.cluster_name}
kubectl config delete-user ${module.eks.cluster_name}
EOT
}
}
check "eks_status" {
assert {
condition = module.eks.cluster_status == "ACTIVE"
error_message = "EKS cluster is ${module.eks.cluster_status}"
}
}
# k2tf -f manifests/mynamespace.yaml
resource "kubernetes_namespace" "mynamespace" {
metadata {
name = "mynamespace"
}
}
resource "kubernetes_pod" "nginx" {
metadata {
name = "nginx"
namespace = "mynamespace"
labels = {
run = "nginx"
}
}
spec {
container {
name = "orange"
image = "nginx"
}
}
}