Setup AWS Load Balancer Contoller on EKS

5 minute read

This post will guide you through installing and using the AWS load balancer controller on EKS with Terraform and an example ingress manifest. I won’t go into details how to setup the VPC and EKS cluster; that can be found in my previous posts. All the referenced Terraform code can be obtained here.

Providers/Versions

providers.tf

 1locals {
 2  env    = "sandbox"
 3  region = "us-east-1"
 4}
 5
 6provider "aws" {
 7  region = local.region
 8  default_tags {
 9    tags = {
10      env       = local.env
11      terraform = true
12    }
13  }
14}
15
16provider "helm" {
17  kubernetes {
18    host                   = module.eks-cluster.endpoint
19    cluster_ca_certificate = base64decode(module.eks-cluster.certificate)
20    exec {
21      api_version = "client.authentication.k8s.io/v1beta1"
22      # This requires the awscli to be installed locally where Terraform is executed
23      args        = ["eks", "get-token", "--cluster-name", module.eks-cluster.name]
24      command     = "aws"
25    }
26  }
27}
28
29provider "kubernetes" {
30  host                   = module.eks-cluster.endpoint
31  cluster_ca_certificate = base64decode(module.eks-cluster.certificate)
32  exec {
33    api_version = "client.authentication.k8s.io/v1beta1"
34    command     = "aws"
35    # This requires the awscli to be installed locally where Terraform is executed
36    args = ["eks", "get-token", "--cluster-name", module.eks-cluster.name]
37  }
38}

versions.tf

 1terraform {
 2  required_providers {
 3    aws = {
 4      source  = "hashicorp/aws"
 5      version = "~> 5.0"
 6    }
 7    kubectl = {
 8      source  = "alekc/kubectl"
 9      version = "~> 2.0.3"
10    }
11    helm = {
12      source  = "hashicorp/helm"
13      version = "~> 2.11.0"
14    }
15  }
16  required_version = "~> 1.5.7"
17}

Module

Initialize the module where needed. The “count” here is where it can be enabled or not through the EKS module and can be removed.

 1module "lb-controller" {
 2  count                  = var.addons["lb_controller"]["enable"] ? 1 : 0
 3  source                 = "../../aws/eks-addons/lb-controller"
 4  cluster_name           = var.env
 5  env                    = var.env
 6  irsa_oidc_provider_arn = aws_iam_openid_connect_provider.cluster.arn
 7  controller_version     = var.addons["lb_controller"]["version"]
 8  depends_on = [
 9    aws_eks_node_group.core
10  ]
11}

Module files

Here I’m targeting my “core” node group, so your affinity rule may need to change.

main.tf

 1resource "helm_release" "lb_controller" {
 2  namespace        = "kube-system"
 3  create_namespace = false
 4  name             = "aws-load-balancer-controller"
 5  repository       = "https://aws.github.io/eks-charts"
 6  chart            = "aws-load-balancer-controller"
 7  version          = var.controller_version
 8
 9  values = [
10    <<-EOT
11    clusterName: ${var.cluster_name}
12    serviceAccount:
13      create: false
14      name: aws-load-balancer-controller
15    affinity:
16      nodeAffinity:
17        requiredDuringSchedulingIgnoredDuringExecution:
18          nodeSelectorTerms:
19          - matchExpressions:
20            - key: role
21              operator: In
22              values:
23              - core
24    EOT
25  ]
26
27  depends_on = [kubernetes_service_account.service_account]
28}
29
30resource "kubernetes_service_account" "service_account" {
31  metadata {
32    name      = "aws-load-balancer-controller"
33    namespace = "kube-system"
34    labels = {
35      "app.kubernetes.io/name"      = "aws-load-balancer-controller"
36      "app.kubernetes.io/component" = "controller"
37    }
38    annotations = {
39      "eks.amazonaws.com/role-arn"               = aws_iam_role.lb.arn
40      "eks.amazonaws.com/sts-regional-endpoints" = "true"
41    }
42  }
43}

The IAM policy referenced here is a long one and can be obtained here.

iam.tf

 1locals {
 2  irsa_oidc_provider_url = replace(var.irsa_oidc_provider_arn, "/^(.*provider/)/", "")
 3  account_id             = data.aws_caller_identity.current.account_id
 4}
 5
 6data "aws_iam_policy_document" "irsa_assume_role" {
 7  statement {
 8    effect  = "Allow"
 9    actions = ["sts:AssumeRoleWithWebIdentity"]
10
11    principals {
12      type        = "Federated"
13      identifiers = [var.irsa_oidc_provider_arn]
14    }
15    condition {
16      test     = "StringEquals"
17      variable = "${local.irsa_oidc_provider_url}:sub"
18      values   = ["system:serviceaccount:kube-system:aws-load-balancer-controller"]
19    }
20    condition {
21      test     = "StringEquals"
22      variable = "${local.irsa_oidc_provider_url}:aud"
23      values   = ["sts.amazonaws.com"]
24    }
25  }
26}
27
28resource "aws_iam_role" "lb" {
29  name               = "eks-lb-controller-${var.env}"
30  assume_role_policy = data.aws_iam_policy_document.irsa_assume_role.json
31  inline_policy {
32    name   = "eks-lb-controller"
33    policy = file("../../modules/aws/eks-addons/lb-controller/files/iam_policy.json")
34  }
35}

data.tf

1data "aws_caller_identity" "current" {}

variables.tf

 1variable "cluster_name" {
 2  type = string
 3}
 4variable "env" {
 5  type = string
 6}
 7variable "irsa_oidc_provider_arn" {
 8  type = string
 9}
10variable "controller_version" {
11  type = string
12}
13variable "cert" {
14  type = string
15}

Demo

There are several annotations in this example and more in the docs to setup the load balancer to your specifications. A few to highlight is “group.name” that will use a shared load balancer or create one if it doesn’t exist, the certificate ARN if using SSL, and the scheme, which in this case is creating a public load balancer. This will create the target group for the app as well and the rules section will create the listeners on the load balancer.

ingress.yaml

 1apiVersion: networking.k8s.io/v1
 2kind: Ingress
 3metadata:
 4  name: flask-app
 5  namespace: sandbox
 6  labels:
 7    helm.sh/chart: flask-app-0.1.0
 8    app.kubernetes.io/version: "1.0.0"
 9    app.kubernetes.io/managed-by: Helm
10    app.kubernetes.io/name: flask-app
11    app.kubernetes.io/instance: flask-app
12  annotations:
13    alb.ingress.kubernetes.io/backend-protocol: HTTP
14    alb.ingress.kubernetes.io/group.name: sandbox-public
15    alb.ingress.kubernetes.io/healthcheck-interval-seconds: "30"
16    alb.ingress.kubernetes.io/healthcheck-path: /health
17    alb.ingress.kubernetes.io/healthcheck-port: "8000"
18    alb.ingress.kubernetes.io/healthcheck-protocol: HTTP
19    alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
20    alb.ingress.kubernetes.io/load-balancer-name: sandbox-public
21    alb.ingress.kubernetes.io/scheme: internet-facing
22    alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-FS-1-2-2019-08
23    alb.ingress.kubernetes.io/tags: environment=sandbox
24    alb.ingress.kubernetes.io/target-type: ip
25    alb.ingress.kubernetes.io/certificate-arn: <certificate_arn>
26    alb.ingress.kubernetes.io/ssl-redirect: '443'
27spec:
28  ingressClassName: alb
29  rules:
30    - host: app.sandbox.test.site # FQDN
31      http:
32        paths:
33        - path: "/"
34          pathType: Prefix
35          backend:
36            service:
37              name: flask-app
38              port:
39                number: 8000

When you create your first ingress resource on your K8s cluster, it will spin up a LB if one doesn’t exist that matches the group name. I find doing this with your first app causes two issues: you don’t have an existing LB to automatically create a DNS record for your app through Terraform since it doesn’t exist yet and if you remove the app, the LB is removed as well if nothing else is using it. That might be fine for a throwaway environment, but I like to create my LBs right off the bat and tie them to a non-connected app in the kube-system namespace for protection.

I’ve found creating the LB through the AWS provider first problematic since the controller is finicky on working with an existing LB, so I’m using the controller itself to create an external and internal LB.

 1resource "kubectl_manifest" "external_app_lb" {
 2  yaml_body = templatefile("../../modules/aws/eks-addons/lb_controller/files/app_lb.yaml", {
 3    CERT       = var.cert
 4    ENV        = var.env
 5    LB_NAME    = "${var.env}-app-external"
 6    LB_TYPE    = "internet-facing"
 7    SSL_POLICY = "ELBSecurityPolicy-FS-1-2-2019-08"
 8  })
 9}
10
11resource "kubectl_manifest" "internal_app_lb" {
12  yaml_body = templatefile("../../modules/aws/eks-addons/lb_controller/files/app_lb.yaml", {
13    CERT       = var.cert
14    ENV        = var.env
15    LB_NAME    = "${var.env}-app-internal"
16    LB_TYPE    = "internal"
17    SSL_POLICY = "ELBSecurityPolicy-FS-1-2-2019-08"
18  })
19}

app_lb.yaml

 1apiVersion: networking.k8s.io/v1
 2kind: Ingress
 3metadata:
 4  name: "${LB_NAME}"
 5  namespace: kube-system
 6  annotations:
 7    alb.ingress.kubernetes.io/actions.default: |
 8      {
 9        "Type": "fixed-response",
10        "FixedResponseConfig": {
11          "ContentType": "text/plain",
12          "StatusCode": "404",
13          "MessageBody": ""
14        }
15      }      
16    alb.ingress.kubernetes.io/certificate-arn: ${CERT}
17    alb.ingress.kubernetes.io/group.name: ${LB_NAME}
18    alb.ingress.kubernetes.io/group.order: "1000"
19    alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80}, {"HTTPS":443}]'
20    alb.ingress.kubernetes.io/load-balancer-name: ${LB_NAME}
21    alb.ingress.kubernetes.io/scheme: ${LB_TYPE}
22    alb.ingress.kubernetes.io/tags: "env=${ENV}"
23    alb.ingress.kubernetes.io/ssl-policy: ${SSL_POLICY}
24    alb.ingress.kubernetes.io/actions.ssl-redirect: |
25      {
26        "Type": "redirect",
27        "RedirectConfig": {
28          "Protocol": "HTTPS",
29          "Port": "443",
30          "StatusCode": "HTTP_301"
31        }
32      }      
33    kubernetes.io/ingress.class: alb
34spec:
35  rules:
36    - http:
37        paths:
38          - pathType: Prefix
39            path: /
40            backend:
41              service:
42                name: default
43                port:
44                  name: use-annotation