Create EKS cluster using Terraform
This post will guide you through all the Terraform code needed to spin up a EKS cluster with Bottlerocket nodes using just the AWS provider instead of using a third-party module. The VPC resources need to be setup beforehand. For the VPC setup, I find having dedicated subnets for EKS clusters beneficial for IP address prefixes since it needs continuous blocks of IP addresses. All the referenced Terraform code can be obtained here.
Module
Initialize the module where needed. For this module, we’re supplying addon data and what type of instances we want in the managed core node group. The way I’m setting up the addons gives the ability to add further ones in a single place and we won’t have to duplicate anything if there were multiple EKS clusters in the same account.
I’ve also added Fargate support if needed.
1locals {
2 env = "sandbox"
3}
4
5provider "aws" {
6 region = local.region
7 default_tags {
8 tags = {
9 env = local.env
10 terraform = true
11 }
12 }
13}
14
15module "eks-cluster" {
16 source = "../../modules/aws/eks"
17 cluster_name = local.env
18 env = local.env
19 region = local.region
20 cluster_version = "1.28"
21 addons = var.addons
22 fargate = false
23 core_instance_type = "t3a.large"
24 core_instance_count = 3
25 core_volume_size = 100
26 log_types = [
27 "api",
28 "audit",
29 "authenticator",
30 "controllerManager",
31 "scheduler"
32 ]
33}
34
35variable "addons" {
36 type = map(any)
37 default = {
38 vpc = {
39 enable = true
40 version = "v1.14.1-eksbuild.1"
41 }
42 ebs = {
43 enable = true
44 version = "v1.24.1-eksbuild.1"
45 }
46 coredns = {
47 enable = true
48 version = "v1.10.1-eksbuild.2"
49 }
50 kube_proxy = {
51 enable = true
52 version = "v1.28.1-eksbuild.1"
53 }
54 }
55}
Module files
The Karpenter discovery tags are needed only for it to discover the cluster, so feel free to remove that if you’re not going to use Karpenter.
cluster.tf
1resource "aws_eks_cluster" "cluster" {
2 name = var.cluster_name
3 version = var.cluster_version
4 role_arn = aws_iam_role.cluster.arn
5
6 vpc_config {
7 subnet_ids = data.aws_subnets.eks_private.ids
8 endpoint_private_access = true
9 endpoint_public_access = false
10 security_group_ids = [aws_security_group.cluster.id]
11 }
12
13 enabled_cluster_log_types = var.log_types
14
15 encryption_config {
16 provider {
17 key_arn = aws_kms_key.eks.arn
18 }
19 resources = ["secrets"]
20 }
21
22 kubernetes_network_config {
23 ip_family = "ipv4"
24 service_ipv4_cidr = "172.20.0.0/16"
25 }
26
27 tags = {
28 "karpenter.sh/discovery" = var.cluster_name
29 }
30
31 depends_on = [
32 aws_iam_role.cluster,
33 aws_kms_key.eks,
34 aws_security_group.cluster
35 ]
36}
Adjustments will need to be made here depending on your subnetting.
data.tf
1data "aws_vpc" "main" {
2 tags = {
3 env = var.env
4 Name = var.env
5 }
6}
7
8data "aws_subnets" "eks_private" {
9 filter {
10 name = "vpc-id"
11 values = [data.aws_vpc.main.id]
12 }
13 tags = {
14 env = var.env
15 type = "eks-private"
16 }
17}
18
19data "aws_ami" "bottlerocket_image" {
20 most_recent = true
21 owners = ["amazon"]
22
23 filter {
24 name = "name"
25 values = ["bottlerocket-aws-k8s-${var.cluster_version}-x86_64-*"]
26 }
27}
28
29data "tls_certificate" "cluster" {
30 url = aws_eks_cluster.cluster.identity[0].oidc[0].issuer
31}
The VPC addon is a requirement for the node groups to turn on prefix delegation before they’re created.
addons.tf
1resource "aws_eks_addon" "vpc" {
2 count = var.addons["vpc"]["enable"] ? 1 : 0
3 cluster_name = aws_eks_cluster.cluster.name
4 addon_name = "vpc-cni"
5 addon_version = var.addon_vpc_version
6 resolve_conflicts_on_create = "OVERWRITE"
7 resolve_conflicts_on_update = "OVERWRITE"
8 configuration_values = jsonencode({
9 env = {
10 ENABLE_PREFIX_DELEGATION = "true"
11 }
12 })
13 depends_on = [aws_eks_cluster.cluster]
14}
15
16resource "aws_eks_addon" "ebs" {
17 count = var.addons["ebs"]["enable"] ? 1 : 0
18 cluster_name = aws_eks_cluster.cluster.name
19 addon_name = "aws-ebs-csi-driver"
20 addon_version = var.addon_ebs_version
21 resolve_conflicts_on_create = "OVERWRITE"
22 resolve_conflicts_on_update = "OVERWRITE"
23 depends_on = [
24 aws_eks_cluster.cluster,
25 aws_eks_node_group.core
26 ]
27}
28
29resource "aws_eks_addon" "coredns" {
30 count = var.addons["coredns"]["enable"] ? 1 : 0
31 cluster_name = aws_eks_cluster.cluster.name
32 addon_name = "coredns"
33 addon_version = var.addon_coredns_version
34 resolve_conflicts_on_create = "OVERWRITE"
35 resolve_conflicts_on_update = "OVERWRITE"
36 depends_on = [
37 aws_eks_cluster.cluster,
38 aws_eks_node_group.core
39 ]
40}
41
42resource "aws_eks_addon" "kube-proxy" {
43 count = var.addons["kube_proxy"]["enable"] ? 1 : 0
44 cluster_name = aws_eks_cluster.cluster.name
45 addon_name = "kube-proxy"
46 addon_version = var.addon_kube_proxy_version
47 resolve_conflicts_on_create = "OVERWRITE"
48 resolve_conflicts_on_update = "OVERWRITE"
49 depends_on = [
50 aws_eks_cluster.cluster,
51 aws_eks_node_group.core
52 ]
53}
iam.tf
1resource "aws_iam_role" "cluster" {
2 name = "eks-cluster-${var.cluster_name}"
3 assume_role_policy = jsonencode({
4 Statement : [
5 {
6 Action : "sts:AssumeRole",
7 Effect : "Allow",
8 Principal : {
9 "Service" : "eks.amazonaws.com"
10 }
11 }
12 ],
13 Version : "2012-10-17"
14 })
15
16 managed_policy_arns = [
17 "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
18 "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController",
19 "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
20 ]
21}
22
23resource "aws_iam_role" "nodes" {
24 name = "eks-nodes-${var.cluster_name}"
25 assume_role_policy = jsonencode({
26 Statement : [
27 {
28 Action : "sts:AssumeRole",
29 Effect : "Allow",
30 Principal : {
31 "Service" : "ec2.amazonaws.com"
32 }
33 }
34 ],
35 Version : "2012-10-17"
36 })
37
38 managed_policy_arns = [
39 "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
40 "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
41 "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
42 "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
43 ]
44}
45
46resource "aws_iam_openid_connect_provider" "cluster" {
47 client_id_list = ["sts.amazonaws.com"]
48 thumbprint_list = [data.tls_certificate.cluster.certificates[0].sha1_fingerprint]
49 url = data.tls_certificate.cluster.url
50}
kms.tf
1resource "aws_kms_key" "eks" {
2 description = "Encrypt EKS secrets"
3 enable_key_rotation = true
4 multi_region = true
5}
6
7resource "aws_kms_alias" "eks" {
8 name = "alias/eks-${var.cluster_name}"
9 target_key_id = aws_kms_key.eks.key_id
10}
Here we’re automatically pulling the device mappings required by the Bottlerocket AMI and the latest AMI id. The template file path is dependent on how you structure your modules. This example is only one node group called “worker” and the lifecycle block will ignore a new AMI release until ready to update.
launch_template.tf
1locals {
2 device_list = tolist(data.aws_ami.bottlerocket_image.block_device_mappings)
3}
4
5resource "aws_launch_template" "core" {
6 name = "eks-core-${var.cluster_name}"
7 disable_api_stop = false
8 disable_api_termination = false
9 image_id = data.aws_ami.bottlerocket_image.id
10 instance_type = var.core_node_type
11 user_data = base64encode(templatefile("../../modules/aws/eks/files/node_config.toml.tftpl", {
12 cluster_name = aws_eks_cluster.cluster.name
13 cluster_endpoint = aws_eks_cluster.cluster.endpoint
14 cluster_ca_data = aws_eks_cluster.cluster.certificate_authority[0].data
15 nodegroup = "core"
16 ami_id = data.aws_ami.bottlerocket_image.id
17 })
18 )
19
20 block_device_mappings {
21 device_name = local.device_list[0]["device_name"]
22
23 ebs {
24 delete_on_termination = true
25 volume_size = 5
26 volume_type = "gp3"
27 encrypted = true
28 }
29 }
30
31 block_device_mappings {
32 device_name = local.device_list[1]["device_name"]
33
34 ebs {
35 delete_on_termination = true
36 volume_size = var.core_node_volume_size
37 volume_type = "gp3"
38 encrypted = true
39 }
40 }
41
42 metadata_options {
43 http_tokens = "required"
44 http_endpoint = "enabled"
45 http_put_response_hop_limit = 2
46 instance_metadata_tags = "enabled"
47 }
48
49 tag_specifications {
50 resource_type = "instance"
51
52 tags = {
53 Name = "eks-core-${var.cluster_name}"
54 terraform = true
55 "eks:cluster-name" = var.env
56 "eks:nodegroup-name" = "core"
57 platform = "eks"
58 env = var.env
59 }
60 }
61
62 tag_specifications {
63 resource_type = "volume"
64
65 tags = {
66 Name = "eks-core-${var.cluster_name}"
67 terraform = true
68 "eks:cluster-name" = var.env
69 "eks:nodegroup-name" = "core"
70 platform = "eks"
71 env = var.env
72 }
73 }
74
75 # Comment out when updating node
76 lifecycle {
77 ignore_changes = [
78 image_id,
79 user_data
80 ]
81 }
82}
nodes.tf
1resource "aws_eks_node_group" "core" {
2 cluster_name = aws_eks_cluster.cluster.name
3 node_group_name = "core"
4 node_role_arn = aws_iam_role.nodes.arn
5 subnet_ids = data.aws_subnets.eks_private.ids
6 ami_type = "CUSTOM"
7 labels = {
8 role = "core"
9 }
10
11 launch_template {
12 name = aws_launch_template.core.name
13 version = aws_launch_template.core.latest_version
14 }
15
16 scaling_config {
17 desired_size = var.core_node_count
18 max_size = var.core_node_count
19 min_size = var.core_node_count
20 }
21
22 update_config {
23 max_unavailable = 1
24 }
25
26 lifecycle {
27 create_before_destroy = true
28 }
29
30 depends_on = [
31 aws_iam_role.nodes,
32 aws_eks_cluster.cluster,
33 aws_launch_template.core,
34 aws_eks_addon.vpc
35 ]
36}
node_config.toml.tftpl
1[settings.kubernetes]
2"cluster-name" = "${cluster_name}"
3"api-server" = "${cluster_endpoint}"
4"cluster-certificate" = "${cluster_ca_data}"
5"cluster-dns-ip" = "172.20.0.10"
6"max-pods" = 110
7[settings.kubernetes.node-labels]
8"eks.amazonaws.com/nodegroup-image" = "${ami_id}"
9"eks.amazonaws.com/capacityType" = "ON_DEMAND"
10"eks.amazonaws.com/nodegroup" = "${nodegroup}"
11"role" = "${nodegroup}"
Adjust the security group to allow access within VPC or VPN etc.
security_groups.tf
1resource "aws_security_group" "cluster" {
2 name = "eks-cluster-${var.cluster_name}"
3 description = "EKS cluster security"
4 vpc_id = data.aws_vpc.main.id
5 egress {
6 description = "full outbound"
7 cidr_blocks = ["0.0.0.0/0"]
8 from_port = "0"
9 protocol = "-1"
10 self = "false"
11 to_port = "0"
12 }
13 ingress {
14 description = "self reference"
15 from_port = "0"
16 protocol = "-1"
17 self = "true"
18 to_port = "0"
19 }
20 ingress {
21 security_groups = [aws_security_group.node.id]
22 description = "eks node group"
23 from_port = "0"
24 protocol = "-1"
25 self = "false"
26 to_port = "0"
27 }
28 tags = {
29 Name = "eks-cluster-${var.env}"
30 }
31}
32
33resource "aws_security_group" "node" {
34 name = "eks-node-${var.cluster_name}"
35 description = "EKS node security"
36 vpc_id = data.aws_vpc.main.id
37 egress {
38 description = "full outbound"
39 cidr_blocks = ["0.0.0.0/0"]
40 from_port = "0"
41 protocol = "-1"
42 self = "false"
43 to_port = "0"
44 }
45 ingress {
46 description = "self reference"
47 from_port = "0"
48 protocol = "-1"
49 self = "true"
50 to_port = "0"
51 }
52 tags = {
53 Name = "eks-node-${var.env}"
54 "karpenter.sh/discovery" = var.cluster_name
55 }
56}
variables.tf
1variable "addons" {
2 type = map(any)
3}
4variable "cluster_name" {
5 type = string
6}
7variable "cluster_version" {
8 type = string
9}
10variable "core_node_count" {
11 type = number
12}
13variable "core_node_type" {
14 type = string
15}
16variable "core_node_volume_size" {
17 type = number
18}
19variable "env" {
20 type = string
21}
22variable "fargate" {
23 type = bool
24}
25variable "log_types" {
26 type = list(string)
27}
28variable "region" {
29 type = string
30}