code-server/terraform/modules/eks/main.tf
Claude b8094ac6a0
Add comprehensive Terraform infrastructure for code-server deployment on AWS
This commit adds complete Terraform infrastructure as code for deploying
code-server on both EC2 and EKS platforms with enterprise-grade security
and SAML/OIDC authentication.

Features:
- EC2 deployment with Auto Scaling Groups and Application Load Balancer
- EKS deployment with managed node groups and AWS Load Balancer Controller
- Private network setup with VPC, private subnets, and NAT gateways
- SAML/OIDC authentication using OAuth2 Proxy
- Security hardening:
  - KMS encryption for data at rest
  - TLS encryption in transit
  - IAM roles with least privilege
  - Security groups with minimal access
  - VPC Flow Logs
  - IMDSv2 enforcement
- Auto-scaling capabilities for both EC2 and EKS
- CloudWatch logging and monitoring
- Automated deployment scripts

Terraform Modules:
- modules/vpc: VPC with public/private subnets, NAT, and VPC endpoints
- modules/security: Security groups, IAM roles, and KMS keys
- modules/ec2: EC2 Auto Scaling deployment with ALB
- modules/eks: EKS cluster with managed node groups and addons

Deployments:
- deployments/ec2: EC2 deployment configuration
- deployments/eks: EKS deployment configuration with Kubernetes manifests

Documentation:
- README.md: Comprehensive deployment and operations guide
- QUICK-START.md: Quick reference for fast deployment
- SAML-SETUP-GUIDE.md: Step-by-step IdP configuration guide

Scripts:
- scripts/deploy-ec2.sh: Automated EC2 deployment
- scripts/deploy-eks.sh: Automated EKS deployment
- scripts/destroy-ec2.sh: EC2 cleanup
- scripts/destroy-eks.sh: EKS cleanup
2025-11-15 17:29:42 +00:00

232 lines
6.4 KiB
HCL

# EKS Module for Code-Server Deployment
# Creates an EKS cluster with managed node groups in private subnets
# EKS Cluster
resource "aws_eks_cluster" "main" {
name = var.cluster_name
role_arn = var.cluster_role_arn
version = var.kubernetes_version
vpc_config {
subnet_ids = concat(var.private_subnet_ids, var.public_subnet_ids)
endpoint_private_access = true
endpoint_public_access = var.endpoint_public_access
public_access_cidrs = var.endpoint_public_access ? var.public_access_cidrs : []
security_group_ids = [var.cluster_security_group_id]
}
encryption_config {
provider {
key_arn = var.kms_key_arn
}
resources = ["secrets"]
}
enabled_cluster_log_types = var.cluster_log_types
tags = merge(
var.tags,
{
Name = var.cluster_name
}
)
depends_on = [
var.cluster_role_arn
]
}
# EKS Cluster Addons
resource "aws_eks_addon" "vpc_cni" {
cluster_name = aws_eks_cluster.main.name
addon_name = "vpc-cni"
addon_version = var.vpc_cni_version
resolve_conflicts_on_create = "OVERWRITE"
resolve_conflicts_on_update = "PRESERVE"
tags = var.tags
}
resource "aws_eks_addon" "kube_proxy" {
cluster_name = aws_eks_cluster.main.name
addon_name = "kube-proxy"
addon_version = var.kube_proxy_version
resolve_conflicts_on_create = "OVERWRITE"
resolve_conflicts_on_update = "PRESERVE"
tags = var.tags
}
resource "aws_eks_addon" "coredns" {
cluster_name = aws_eks_cluster.main.name
addon_name = "coredns"
addon_version = var.coredns_version
resolve_conflicts_on_create = "OVERWRITE"
resolve_conflicts_on_update = "PRESERVE"
tags = var.tags
depends_on = [
aws_eks_node_group.main
]
}
resource "aws_eks_addon" "ebs_csi_driver" {
count = var.enable_ebs_csi_driver ? 1 : 0
cluster_name = aws_eks_cluster.main.name
addon_name = "aws-ebs-csi-driver"
addon_version = var.ebs_csi_driver_version
resolve_conflicts_on_create = "OVERWRITE"
resolve_conflicts_on_update = "PRESERVE"
service_account_role_arn = aws_iam_role.ebs_csi_driver[0].arn
tags = var.tags
}
# IAM Role for EBS CSI Driver
resource "aws_iam_role" "ebs_csi_driver" {
count = var.enable_ebs_csi_driver ? 1 : 0
name = "${var.cluster_name}-ebs-csi-driver-role"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Principal = {
Federated = aws_iam_openid_connect_provider.eks[0].arn
}
Action = "sts:AssumeRoleWithWebIdentity"
Condition = {
StringEquals = {
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:sub" = "system:serviceaccount:kube-system:ebs-csi-controller-sa"
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:aud" = "sts.amazonaws.com"
}
}
}
]
})
tags = var.tags
}
resource "aws_iam_role_policy_attachment" "ebs_csi_driver" {
count = var.enable_ebs_csi_driver ? 1 : 0
role = aws_iam_role.ebs_csi_driver[0].name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
}
# OIDC Provider for EKS
data "tls_certificate" "eks" {
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "eks" {
count = var.enable_irsa ? 1 : 0
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = [data.tls_certificate.eks.certificates[0].sha1_fingerprint]
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
tags = var.tags
}
# EKS Node Group
resource "aws_eks_node_group" "main" {
cluster_name = aws_eks_cluster.main.name
node_group_name = "${var.cluster_name}-node-group"
node_role_arn = var.node_role_arn
subnet_ids = var.private_subnet_ids
version = var.kubernetes_version
scaling_config {
desired_size = var.desired_nodes
max_size = var.max_nodes
min_size = var.min_nodes
}
update_config {
max_unavailable = 1
}
instance_types = var.node_instance_types
capacity_type = var.capacity_type
disk_size = var.node_disk_size
labels = var.node_labels
dynamic "taint" {
for_each = var.node_taints
content {
key = taint.value.key
value = taint.value.value
effect = taint.value.effect
}
}
tags = merge(
var.tags,
{
Name = "${var.cluster_name}-node-group"
}
)
lifecycle {
create_before_destroy = true
ignore_changes = [scaling_config[0].desired_size]
}
depends_on = [
var.node_role_arn
]
}
# CloudWatch Log Group for EKS
resource "aws_cloudwatch_log_group" "eks_cluster" {
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.log_retention_days
tags = var.tags
}
# IAM Role for AWS Load Balancer Controller
resource "aws_iam_role" "aws_load_balancer_controller" {
count = var.enable_aws_load_balancer_controller ? 1 : 0
name = "${var.cluster_name}-aws-load-balancer-controller"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Principal = {
Federated = var.enable_irsa ? aws_iam_openid_connect_provider.eks[0].arn : null
}
Action = "sts:AssumeRoleWithWebIdentity"
Condition = {
StringEquals = var.enable_irsa ? {
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:sub" = "system:serviceaccount:kube-system:aws-load-balancer-controller"
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:aud" = "sts.amazonaws.com"
} : {}
}
}
]
})
tags = var.tags
}
resource "aws_iam_policy" "aws_load_balancer_controller" {
count = var.enable_aws_load_balancer_controller ? 1 : 0
name = "${var.cluster_name}-AWSLoadBalancerControllerIAMPolicy"
description = "IAM policy for AWS Load Balancer Controller"
policy = file("${path.module}/iam-policy-aws-load-balancer-controller.json")
tags = var.tags
}
resource "aws_iam_role_policy_attachment" "aws_load_balancer_controller" {
count = var.enable_aws_load_balancer_controller ? 1 : 0
role = aws_iam_role.aws_load_balancer_controller[0].name
policy_arn = aws_iam_policy.aws_load_balancer_controller[0].arn
}