mirror of
https://github.com/cdr/code-server.git
synced 2025-12-07 17:02:27 +01:00
Add comprehensive Terraform infrastructure for code-server deployment on AWS
This commit adds complete Terraform infrastructure as code for deploying code-server on both EC2 and EKS platforms with enterprise-grade security and SAML/OIDC authentication. Features: - EC2 deployment with Auto Scaling Groups and Application Load Balancer - EKS deployment with managed node groups and AWS Load Balancer Controller - Private network setup with VPC, private subnets, and NAT gateways - SAML/OIDC authentication using OAuth2 Proxy - Security hardening: - KMS encryption for data at rest - TLS encryption in transit - IAM roles with least privilege - Security groups with minimal access - VPC Flow Logs - IMDSv2 enforcement - Auto-scaling capabilities for both EC2 and EKS - CloudWatch logging and monitoring - Automated deployment scripts Terraform Modules: - modules/vpc: VPC with public/private subnets, NAT, and VPC endpoints - modules/security: Security groups, IAM roles, and KMS keys - modules/ec2: EC2 Auto Scaling deployment with ALB - modules/eks: EKS cluster with managed node groups and addons Deployments: - deployments/ec2: EC2 deployment configuration - deployments/eks: EKS deployment configuration with Kubernetes manifests Documentation: - README.md: Comprehensive deployment and operations guide - QUICK-START.md: Quick reference for fast deployment - SAML-SETUP-GUIDE.md: Step-by-step IdP configuration guide Scripts: - scripts/deploy-ec2.sh: Automated EC2 deployment - scripts/deploy-eks.sh: Automated EKS deployment - scripts/destroy-ec2.sh: EC2 cleanup - scripts/destroy-eks.sh: EKS cleanup
This commit is contained in:
parent
11e6e656c0
commit
b8094ac6a0
31 changed files with 5453 additions and 0 deletions
233
terraform/QUICK-START.md
Normal file
233
terraform/QUICK-START.md
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
# Code-Server AWS Deployment - Quick Start
|
||||
|
||||
This is a condensed guide to get code-server running on AWS quickly. For detailed documentation, see [README.md](README.md).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- AWS Account with credentials configured
|
||||
- Terraform >= 1.0
|
||||
- AWS CLI
|
||||
- kubectl and Helm (for EKS deployment)
|
||||
- SAML/OIDC Provider configured (Okta, Azure AD, Google, etc.)
|
||||
|
||||
## 5-Minute EC2 Setup
|
||||
|
||||
### 1. Configure Variables
|
||||
|
||||
```bash
|
||||
cd deployments/ec2
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
```
|
||||
|
||||
Edit `terraform.tfvars` with minimum required values:
|
||||
|
||||
```hcl
|
||||
aws_region = "us-east-1"
|
||||
|
||||
# OAuth2/SAML Configuration
|
||||
oauth2_client_id = "your-client-id"
|
||||
oauth2_client_secret = "your-client-secret"
|
||||
oauth2_issuer_url = "https://your-idp.com/.well-known/openid-configuration"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oauth2_cookie_secret = "run: python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())'"
|
||||
```
|
||||
|
||||
### 2. Deploy
|
||||
|
||||
```bash
|
||||
../../scripts/deploy-ec2.sh
|
||||
```
|
||||
|
||||
### 3. Get Access URL
|
||||
|
||||
```bash
|
||||
terraform output alb_url
|
||||
```
|
||||
|
||||
### 4. Get Password
|
||||
|
||||
```bash
|
||||
aws secretsmanager get-secret-value \
|
||||
--secret-id $(terraform output -raw code_server_password_secret_arn) \
|
||||
--query SecretString --output text
|
||||
```
|
||||
|
||||
## 10-Minute EKS Setup
|
||||
|
||||
### 1. Configure Variables
|
||||
|
||||
```bash
|
||||
cd deployments/eks
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
```
|
||||
|
||||
Edit `terraform.tfvars`:
|
||||
|
||||
```hcl
|
||||
aws_region = "us-east-1"
|
||||
|
||||
# OAuth2/SAML Configuration
|
||||
oauth2_client_id = "your-client-id"
|
||||
oauth2_client_secret = "your-client-secret"
|
||||
oauth2_cookie_secret = "generate-random-secret"
|
||||
```
|
||||
|
||||
Edit `k8s/code-server-values.yaml`:
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
hosts:
|
||||
- host: code-server.example.com
|
||||
```
|
||||
|
||||
Edit `k8s/oauth2-proxy.yaml`:
|
||||
|
||||
```yaml
|
||||
data:
|
||||
oauth2_proxy.cfg: |
|
||||
redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oidc_issuer_url = "https://your-idp.com"
|
||||
```
|
||||
|
||||
### 2. Deploy
|
||||
|
||||
```bash
|
||||
../../scripts/deploy-eks.sh
|
||||
```
|
||||
|
||||
### 3. Get Load Balancer URL
|
||||
|
||||
```bash
|
||||
kubectl get ingress -n code-server
|
||||
```
|
||||
|
||||
## Common Commands
|
||||
|
||||
### EC2
|
||||
|
||||
```bash
|
||||
# View logs
|
||||
aws logs tail /aws/ec2/code-server-dev-code-server --follow
|
||||
|
||||
# Scale instances
|
||||
terraform apply -var="desired_instances=3"
|
||||
|
||||
# Destroy
|
||||
../../scripts/destroy-ec2.sh
|
||||
```
|
||||
|
||||
### EKS
|
||||
|
||||
```bash
|
||||
# View pods
|
||||
kubectl get pods -n code-server
|
||||
|
||||
# View logs
|
||||
kubectl logs -n code-server -l app.kubernetes.io/name=code-server -f
|
||||
|
||||
# Scale pods
|
||||
kubectl scale deployment code-server -n code-server --replicas=3
|
||||
|
||||
# Destroy
|
||||
../../scripts/destroy-eks.sh
|
||||
```
|
||||
|
||||
## Generate Cookie Secret
|
||||
|
||||
```bash
|
||||
python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())'
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
openssl rand -base64 32
|
||||
```
|
||||
|
||||
## SAML/OIDC Provider Quick Links
|
||||
|
||||
### Okta
|
||||
|
||||
```hcl
|
||||
oauth2_issuer_url = "https://<tenant>.okta.com/.well-known/openid-configuration"
|
||||
```
|
||||
|
||||
### Azure AD
|
||||
|
||||
```hcl
|
||||
oauth2_issuer_url = "https://login.microsoftonline.com/<tenant-id>/v2.0/.well-known/openid-configuration"
|
||||
```
|
||||
|
||||
### Google
|
||||
|
||||
```hcl
|
||||
oauth2_issuer_url = "https://accounts.google.com/.well-known/openid-configuration"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Can't access code-server
|
||||
|
||||
1. Check security group allows your IP
|
||||
2. Verify ALB is healthy: `aws elbv2 describe-target-health --target-group-arn <arn>`
|
||||
3. Check logs for errors
|
||||
|
||||
### Authentication fails
|
||||
|
||||
1. Verify redirect URL matches IdP configuration exactly
|
||||
2. Check client ID and secret are correct
|
||||
3. View OAuth2 Proxy logs for detailed error messages
|
||||
|
||||
### Pods not starting (EKS)
|
||||
|
||||
1. Check events: `kubectl get events -n code-server --sort-by='.lastTimestamp'`
|
||||
2. Check pod status: `kubectl describe pod <pod> -n code-server`
|
||||
3. Verify nodes have capacity: `kubectl top nodes`
|
||||
|
||||
## Cost Estimate
|
||||
|
||||
### EC2 (t3.medium, 1 instance)
|
||||
|
||||
- EC2: ~$30/month
|
||||
- ALB: ~$20/month
|
||||
- NAT Gateway: ~$32/month
|
||||
- EBS: ~$5/month
|
||||
- **Total: ~$87/month**
|
||||
|
||||
### EKS (t3.medium, 2 nodes)
|
||||
|
||||
- EKS Control Plane: ~$73/month
|
||||
- EC2 Nodes: ~$60/month
|
||||
- ALB: ~$20/month
|
||||
- NAT Gateway: ~$32/month
|
||||
- EBS: ~$10/month
|
||||
- **Total: ~$195/month**
|
||||
|
||||
### Cost Optimization
|
||||
|
||||
- Use single NAT gateway: Save ~$32-64/month
|
||||
- Use SPOT instances (EKS): Save up to 90% on compute
|
||||
- Scale to zero during off-hours: Save on compute costs
|
||||
- Use GP3 instead of GP2: Save ~20% on storage
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Configure DNS (CNAME to ALB DNS)
|
||||
2. Set up ACM certificate for HTTPS
|
||||
3. Configure auto-scaling policies
|
||||
4. Set up CloudWatch alarms
|
||||
5. Review and adjust security groups
|
||||
6. Configure backup/snapshot policies
|
||||
|
||||
## Full Documentation
|
||||
|
||||
- [Complete README](README.md) - Detailed deployment guide
|
||||
- [SAML Setup Guide](SAML-SETUP-GUIDE.md) - IdP configuration
|
||||
- [Code-Server Docs](https://coder.com/docs/code-server) - Code-Server features
|
||||
|
||||
## Support
|
||||
|
||||
For issues:
|
||||
1. Check [Troubleshooting](README.md#troubleshooting) in README
|
||||
2. Review CloudWatch logs
|
||||
3. Check AWS service health dashboard
|
||||
758
terraform/README.md
Normal file
758
terraform/README.md
Normal file
|
|
@ -0,0 +1,758 @@
|
|||
# Code-Server AWS Deployment with Terraform
|
||||
|
||||
This repository contains Terraform code to deploy [code-server](https://github.com/coder/code-server) on AWS using either **EC2** or **EKS**, with private networking, security hardening, and SAML authentication.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Architecture](#architecture)
|
||||
- [Features](#features)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Quick Start](#quick-start)
|
||||
- [EC2 Deployment](#ec2-deployment)
|
||||
- [EKS Deployment](#eks-deployment)
|
||||
- [Configuration](#configuration)
|
||||
- [SAML/OIDC Authentication](#samloidc-authentication)
|
||||
- [Security Features](#security-features)
|
||||
- [Deployment Procedures](#deployment-procedures)
|
||||
- [Rollout and Updates](#rollout-and-updates)
|
||||
- [Monitoring and Logging](#monitoring-and-logging)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Cost Optimization](#cost-optimization)
|
||||
- [Cleanup](#cleanup)
|
||||
|
||||
## Overview
|
||||
|
||||
This Terraform configuration provides two deployment options for code-server:
|
||||
|
||||
1. **EC2 Deployment**: Code-server running on Auto Scaling EC2 instances behind an Application Load Balancer
|
||||
2. **EKS Deployment**: Code-server running on Amazon EKS (Kubernetes) with Helm charts
|
||||
|
||||
Both deployments include:
|
||||
- Private networking with VPC, subnets, and NAT gateways
|
||||
- SAML/OIDC authentication via OAuth2 Proxy
|
||||
- HTTPS support with ACM certificates
|
||||
- Encryption at rest using AWS KMS
|
||||
- Auto-scaling capabilities
|
||||
- CloudWatch logging and monitoring
|
||||
- Security hardening following AWS best practices
|
||||
|
||||
## Architecture
|
||||
|
||||
### EC2 Architecture
|
||||
|
||||
```
|
||||
Internet → ALB (HTTPS) → OAuth2 Proxy → Code-Server (EC2 Auto Scaling)
|
||||
↓ ↓
|
||||
Private Subnets Private Subnets
|
||||
↓ ↓
|
||||
NAT Gateway VPC Endpoints
|
||||
↓
|
||||
IGW
|
||||
```
|
||||
|
||||
### EKS Architecture
|
||||
|
||||
```
|
||||
Internet → ALB Ingress → OAuth2 Proxy Pod → Code-Server Pods
|
||||
↓ ↓
|
||||
EKS Cluster EKS Nodes
|
||||
↓ ↓
|
||||
Private Subnets Private Subnets
|
||||
↓ ↓
|
||||
NAT Gateway VPC Endpoints
|
||||
↓
|
||||
IGW
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Private Network Setup**: All compute resources in private subnets
|
||||
- **SAML/OIDC Authentication**: OAuth2 Proxy for enterprise SSO integration
|
||||
- **High Availability**: Multi-AZ deployment with auto-scaling
|
||||
- **Security**:
|
||||
- Encryption at rest (KMS)
|
||||
- Encryption in transit (TLS)
|
||||
- IAM roles with least privilege
|
||||
- Security groups with minimal ingress
|
||||
- VPC Flow Logs
|
||||
- IMDSv2 required
|
||||
- **Monitoring**: CloudWatch Logs and Metrics
|
||||
- **Infrastructure as Code**: Full Terraform automation
|
||||
- **Cost Optimized**: Options for single NAT gateway and SPOT instances
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before deploying, ensure you have:
|
||||
|
||||
1. **AWS Account** with appropriate permissions
|
||||
2. **AWS CLI** configured with credentials
|
||||
```bash
|
||||
aws configure
|
||||
```
|
||||
3. **Terraform** >= 1.0 installed
|
||||
```bash
|
||||
# Install via brew (macOS)
|
||||
brew install terraform
|
||||
|
||||
# Or download from https://www.terraform.io/downloads
|
||||
```
|
||||
4. **kubectl** (for EKS deployment)
|
||||
```bash
|
||||
brew install kubectl
|
||||
```
|
||||
5. **Helm** (for EKS deployment)
|
||||
```bash
|
||||
brew install helm
|
||||
```
|
||||
6. **ACM Certificate** (optional, for HTTPS)
|
||||
- Request a certificate in AWS Certificate Manager
|
||||
- Validate domain ownership
|
||||
- Note the certificate ARN
|
||||
|
||||
7. **SAML/OIDC Provider** configured (e.g., Okta, Azure AD, Google Workspace)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### EC2 Deployment
|
||||
|
||||
1. **Navigate to EC2 deployment directory:**
|
||||
```bash
|
||||
cd deployments/ec2
|
||||
```
|
||||
|
||||
2. **Copy and configure variables:**
|
||||
```bash
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
vim terraform.tfvars # Edit with your values
|
||||
```
|
||||
|
||||
3. **Deploy using the automated script:**
|
||||
```bash
|
||||
../../scripts/deploy-ec2.sh
|
||||
```
|
||||
|
||||
Or manually:
|
||||
```bash
|
||||
terraform init
|
||||
terraform plan
|
||||
terraform apply
|
||||
```
|
||||
|
||||
4. **Get the code-server password:**
|
||||
```bash
|
||||
aws secretsmanager get-secret-value \
|
||||
--secret-id $(terraform output -raw code_server_password_secret_arn) \
|
||||
--query SecretString \
|
||||
--output text
|
||||
```
|
||||
|
||||
5. **Access code-server:**
|
||||
```bash
|
||||
echo $(terraform output -raw alb_url)
|
||||
# Navigate to this URL in your browser
|
||||
```
|
||||
|
||||
### EKS Deployment
|
||||
|
||||
1. **Navigate to EKS deployment directory:**
|
||||
```bash
|
||||
cd deployments/eks
|
||||
```
|
||||
|
||||
2. **Copy and configure variables:**
|
||||
```bash
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
vim terraform.tfvars # Edit with your values
|
||||
```
|
||||
|
||||
3. **Deploy using the automated script:**
|
||||
```bash
|
||||
../../scripts/deploy-eks.sh
|
||||
```
|
||||
|
||||
This script will:
|
||||
- Deploy EKS infrastructure
|
||||
- Configure kubectl
|
||||
- Install AWS Load Balancer Controller
|
||||
- Deploy code-server (optional)
|
||||
- Deploy OAuth2 Proxy (optional)
|
||||
|
||||
4. **Manual deployment alternative:**
|
||||
```bash
|
||||
# Deploy infrastructure
|
||||
terraform init
|
||||
terraform plan
|
||||
terraform apply
|
||||
|
||||
# Configure kubectl
|
||||
aws eks update-kubeconfig --region <region> --name <cluster-name>
|
||||
|
||||
# Deploy code-server
|
||||
helm upgrade --install code-server ../../ci/helm-chart \
|
||||
--namespace code-server \
|
||||
--create-namespace \
|
||||
--values k8s/code-server-values.yaml
|
||||
|
||||
# Deploy OAuth2 Proxy
|
||||
kubectl apply -f k8s/oauth2-proxy.yaml
|
||||
```
|
||||
|
||||
5. **Get the Load Balancer URL:**
|
||||
```bash
|
||||
kubectl get ingress -n code-server
|
||||
# Wait for ADDRESS field to be populated
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Key Configuration Files
|
||||
|
||||
#### EC2 Deployment
|
||||
|
||||
- `deployments/ec2/terraform.tfvars` - Main configuration
|
||||
- `modules/ec2/user-data.sh` - EC2 initialization script
|
||||
|
||||
Important variables:
|
||||
|
||||
```hcl
|
||||
# Network
|
||||
vpc_cidr = "10.0.0.0/16"
|
||||
private_subnet_cidrs = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
|
||||
# Security
|
||||
allowed_cidr_blocks = ["10.0.0.0/8"] # Restrict access
|
||||
internal_alb = true # Private load balancer
|
||||
|
||||
# OAuth2/SAML
|
||||
oauth2_issuer_url = "https://your-idp.com/.well-known/openid-configuration"
|
||||
oauth2_client_id = "your-client-id"
|
||||
oauth2_client_secret = "your-client-secret"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
|
||||
# Generate cookie secret
|
||||
oauth2_cookie_secret = "run: python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())'"
|
||||
```
|
||||
|
||||
#### EKS Deployment
|
||||
|
||||
- `deployments/eks/terraform.tfvars` - Main configuration
|
||||
- `deployments/eks/k8s/code-server-values.yaml` - Helm values
|
||||
- `deployments/eks/k8s/oauth2-proxy.yaml` - OAuth2 Proxy manifest
|
||||
|
||||
Important Helm values:
|
||||
|
||||
```yaml
|
||||
# k8s/code-server-values.yaml
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "alb"
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/scheme: internal
|
||||
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "gp3"
|
||||
size: 20Gi
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
## SAML/OIDC Authentication
|
||||
|
||||
This deployment uses OAuth2 Proxy to provide SAML/OIDC authentication.
|
||||
|
||||
### Supported Providers
|
||||
|
||||
- Okta
|
||||
- Azure Active Directory
|
||||
- Google Workspace
|
||||
- AWS SSO (IAM Identity Center)
|
||||
- Any OIDC-compliant provider
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Configure your Identity Provider:**
|
||||
|
||||
**For Okta:**
|
||||
```
|
||||
Application Type: Web
|
||||
Sign-in redirect URIs: https://code-server.example.com/oauth2/callback
|
||||
Sign-out redirect URIs: https://code-server.example.com
|
||||
Grant types: Authorization Code, Refresh Token
|
||||
```
|
||||
|
||||
**For Azure AD:**
|
||||
```
|
||||
Platform: Web
|
||||
Redirect URI: https://code-server.example.com/oauth2/callback
|
||||
Supported account types: Single tenant or Multi-tenant
|
||||
```
|
||||
|
||||
2. **Get OIDC Discovery URL:**
|
||||
|
||||
Usually in format:
|
||||
- Okta: `https://<tenant>.okta.com/.well-known/openid-configuration`
|
||||
- Azure AD: `https://login.microsoftonline.com/<tenant-id>/v2.0/.well-known/openid-configuration`
|
||||
- Google: `https://accounts.google.com/.well-known/openid-configuration`
|
||||
|
||||
3. **Configure Terraform variables:**
|
||||
```hcl
|
||||
oauth2_issuer_url = "<OIDC_DISCOVERY_URL>"
|
||||
oauth2_client_id = "<CLIENT_ID>"
|
||||
oauth2_client_secret = "<CLIENT_SECRET>"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
```
|
||||
|
||||
4. **Restrict access by email (optional):**
|
||||
```hcl
|
||||
oauth2_allowed_emails = [
|
||||
"user1@company.com",
|
||||
"user2@company.com"
|
||||
]
|
||||
```
|
||||
|
||||
### Testing Authentication
|
||||
|
||||
1. Access your code-server URL
|
||||
2. You should be redirected to your IdP login page
|
||||
3. After successful authentication, you'll be redirected back to code-server
|
||||
4. OAuth2 Proxy validates the session and proxies requests to code-server
|
||||
|
||||
## Security Features
|
||||
|
||||
### Network Security
|
||||
|
||||
- **Private Subnets**: All compute resources in private subnets
|
||||
- **NAT Gateway**: Outbound internet access without public IPs
|
||||
- **Security Groups**: Minimal ingress rules
|
||||
- **VPC Flow Logs**: Network traffic monitoring
|
||||
- **Internal ALB Option**: Keep load balancer private
|
||||
|
||||
### Encryption
|
||||
|
||||
- **At Rest**: KMS encryption for EBS volumes and EKS secrets
|
||||
- **In Transit**: TLS for all external connections
|
||||
- **Secrets**: AWS Secrets Manager for code-server password
|
||||
|
||||
### IAM
|
||||
|
||||
- **Least Privilege**: Minimal IAM permissions
|
||||
- **IRSA** (EKS): IAM Roles for Service Accounts
|
||||
- **Instance Profiles**: Role-based access for EC2
|
||||
|
||||
### Compliance
|
||||
|
||||
- **IMDSv2 Required**: Enhanced EC2 metadata security
|
||||
- **Encrypted Storage**: All data encrypted at rest
|
||||
- **Audit Logs**: CloudWatch and VPC Flow Logs
|
||||
|
||||
## Deployment Procedures
|
||||
|
||||
### Initial Deployment
|
||||
|
||||
1. **Prepare Configuration:**
|
||||
```bash
|
||||
cd deployments/<ec2|eks>
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
# Edit terraform.tfvars with your values
|
||||
```
|
||||
|
||||
2. **Review Plan:**
|
||||
```bash
|
||||
terraform init
|
||||
terraform plan
|
||||
# Review all resources to be created
|
||||
```
|
||||
|
||||
3. **Apply Configuration:**
|
||||
```bash
|
||||
terraform apply
|
||||
# Type 'yes' to confirm
|
||||
```
|
||||
|
||||
4. **Verify Deployment:**
|
||||
```bash
|
||||
# EC2
|
||||
aws autoscaling describe-auto-scaling-groups \
|
||||
--auto-scaling-group-names <asg-name>
|
||||
|
||||
# EKS
|
||||
kubectl get pods -n code-server
|
||||
kubectl get ingress -n code-server
|
||||
```
|
||||
|
||||
### DNS Configuration
|
||||
|
||||
1. **Get Load Balancer DNS:**
|
||||
```bash
|
||||
# EC2
|
||||
terraform output alb_dns_name
|
||||
|
||||
# EKS
|
||||
kubectl get ingress -n code-server -o jsonpath='{.items[0].status.loadBalancer.ingress[0].hostname}'
|
||||
```
|
||||
|
||||
2. **Create DNS Record:**
|
||||
```
|
||||
Type: CNAME
|
||||
Name: code-server.example.com
|
||||
Value: <alb-dns-name>
|
||||
TTL: 300
|
||||
```
|
||||
|
||||
3. **Update Configuration:**
|
||||
```hcl
|
||||
# Update oauth2_redirect_url with actual domain
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
```
|
||||
|
||||
4. **Reapply:**
|
||||
```bash
|
||||
terraform apply
|
||||
```
|
||||
|
||||
## Rollout and Updates
|
||||
|
||||
### Update Code-Server Version
|
||||
|
||||
**EC2:**
|
||||
```bash
|
||||
# Update version in terraform.tfvars
|
||||
code_server_version = "4.19.0"
|
||||
|
||||
# Apply changes (will trigger rolling update)
|
||||
terraform apply
|
||||
|
||||
# Monitor Auto Scaling Group
|
||||
aws autoscaling describe-auto-scaling-instances
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
```bash
|
||||
# Update version in k8s/code-server-values.yaml
|
||||
image:
|
||||
tag: "4.19.0"
|
||||
|
||||
# Perform rolling update
|
||||
helm upgrade code-server ../../ci/helm-chart \
|
||||
--namespace code-server \
|
||||
--values k8s/code-server-values.yaml \
|
||||
--wait
|
||||
|
||||
# Monitor rollout
|
||||
kubectl rollout status deployment/code-server -n code-server
|
||||
```
|
||||
|
||||
### Blue-Green Deployment (EKS)
|
||||
|
||||
```bash
|
||||
# Create new deployment with different name
|
||||
helm install code-server-blue ../../ci/helm-chart \
|
||||
--namespace code-server \
|
||||
--values k8s/code-server-values-blue.yaml
|
||||
|
||||
# Test the new version
|
||||
kubectl port-forward -n code-server svc/code-server-blue 8081:8080
|
||||
|
||||
# Switch traffic by updating ingress
|
||||
kubectl apply -f k8s/ingress-blue.yaml
|
||||
|
||||
# Delete old deployment
|
||||
helm uninstall code-server-green -n code-server
|
||||
```
|
||||
|
||||
### Scaling
|
||||
|
||||
**EC2:**
|
||||
```bash
|
||||
# Update desired capacity
|
||||
terraform apply -var="desired_instances=3"
|
||||
|
||||
# Or use AWS CLI
|
||||
aws autoscaling set-desired-capacity \
|
||||
--auto-scaling-group-name <asg-name> \
|
||||
--desired-capacity 3
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
```bash
|
||||
# Scale deployment
|
||||
kubectl scale deployment code-server -n code-server --replicas=3
|
||||
|
||||
# Or update Helm values
|
||||
helm upgrade code-server ../../ci/helm-chart \
|
||||
--namespace code-server \
|
||||
--set replicaCount=3
|
||||
```
|
||||
|
||||
### Rollback
|
||||
|
||||
**EC2:**
|
||||
```bash
|
||||
# Terraform doesn't have built-in rollback
|
||||
# Revert changes in git and reapply
|
||||
git revert <commit>
|
||||
terraform apply
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
```bash
|
||||
# Helm rollback
|
||||
helm rollback code-server -n code-server
|
||||
|
||||
# Kubernetes rollback
|
||||
kubectl rollout undo deployment/code-server -n code-server
|
||||
```
|
||||
|
||||
## Monitoring and Logging
|
||||
|
||||
### CloudWatch Logs
|
||||
|
||||
**EC2:**
|
||||
```bash
|
||||
# View logs
|
||||
aws logs tail /aws/ec2/<prefix>-code-server --follow
|
||||
|
||||
# Filter logs
|
||||
aws logs filter-log-events \
|
||||
--log-group-name /aws/ec2/<prefix>-code-server \
|
||||
--filter-pattern "ERROR"
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
```bash
|
||||
# View pod logs
|
||||
kubectl logs -n code-server -l app.kubernetes.io/name=code-server --tail=100 -f
|
||||
|
||||
# View previous pod logs
|
||||
kubectl logs -n code-server <pod-name> --previous
|
||||
|
||||
# View OAuth2 Proxy logs
|
||||
kubectl logs -n code-server -l app=oauth2-proxy --tail=100 -f
|
||||
```
|
||||
|
||||
### Metrics
|
||||
|
||||
**EC2:**
|
||||
```bash
|
||||
# Auto Scaling Group metrics
|
||||
aws cloudwatch get-metric-statistics \
|
||||
--namespace AWS/EC2 \
|
||||
--metric-name CPUUtilization \
|
||||
--dimensions Name=AutoScalingGroupName,Value=<asg-name> \
|
||||
--start-time 2024-01-01T00:00:00Z \
|
||||
--end-time 2024-01-01T23:59:59Z \
|
||||
--period 3600 \
|
||||
--statistics Average
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
```bash
|
||||
# Pod metrics (requires metrics-server)
|
||||
kubectl top pods -n code-server
|
||||
|
||||
# Node metrics
|
||||
kubectl top nodes
|
||||
|
||||
# View HPA status
|
||||
kubectl get hpa -n code-server
|
||||
```
|
||||
|
||||
### Health Checks
|
||||
|
||||
**EC2:**
|
||||
```bash
|
||||
# Check target group health
|
||||
aws elbv2 describe-target-health \
|
||||
--target-group-arn <target-group-arn>
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
```bash
|
||||
# Check pod health
|
||||
kubectl get pods -n code-server
|
||||
kubectl describe pod <pod-name> -n code-server
|
||||
|
||||
# Check ingress status
|
||||
kubectl describe ingress -n code-server
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### EC2: Instances Not Healthy
|
||||
|
||||
```bash
|
||||
# Check Auto Scaling Group
|
||||
aws autoscaling describe-auto-scaling-groups \
|
||||
--auto-scaling-group-names <asg-name>
|
||||
|
||||
# Check instance logs (via SSM)
|
||||
aws ssm start-session --target <instance-id>
|
||||
|
||||
# View user-data logs
|
||||
sudo cat /var/log/cloud-init-output.log
|
||||
|
||||
# Check Docker containers
|
||||
sudo docker ps
|
||||
sudo docker logs code-server
|
||||
sudo docker logs oauth2-proxy
|
||||
```
|
||||
|
||||
#### EKS: Pods Not Starting
|
||||
|
||||
```bash
|
||||
# Check pod status
|
||||
kubectl describe pod <pod-name> -n code-server
|
||||
|
||||
# Check events
|
||||
kubectl get events -n code-server --sort-by='.lastTimestamp'
|
||||
|
||||
# Check storage
|
||||
kubectl get pvc -n code-server
|
||||
kubectl describe pvc <pvc-name> -n code-server
|
||||
|
||||
# Check node resources
|
||||
kubectl describe node <node-name>
|
||||
```
|
||||
|
||||
#### Authentication Not Working
|
||||
|
||||
```bash
|
||||
# EC2: Check OAuth2 Proxy logs
|
||||
aws logs tail /aws/ec2/<prefix>-code-server \
|
||||
--filter-pattern "oauth2-proxy" \
|
||||
--follow
|
||||
|
||||
# EKS: Check OAuth2 Proxy logs
|
||||
kubectl logs -n code-server -l app=oauth2-proxy
|
||||
|
||||
# Verify configuration
|
||||
# - Redirect URL matches IdP configuration
|
||||
# - Client ID and secret are correct
|
||||
# - Issuer URL is accessible
|
||||
```
|
||||
|
||||
#### Load Balancer Not Accessible
|
||||
|
||||
```bash
|
||||
# Check security groups
|
||||
aws ec2 describe-security-groups --group-ids <sg-id>
|
||||
|
||||
# Check ALB status
|
||||
aws elbv2 describe-load-balancers
|
||||
|
||||
# Check target health
|
||||
aws elbv2 describe-target-health --target-group-arn <arn>
|
||||
|
||||
# EKS: Check ingress
|
||||
kubectl describe ingress -n code-server
|
||||
```
|
||||
|
||||
## Cost Optimization
|
||||
|
||||
### Single NAT Gateway
|
||||
|
||||
Reduce costs by using a single NAT gateway (not recommended for production):
|
||||
|
||||
```hcl
|
||||
single_nat_gateway = true
|
||||
```
|
||||
|
||||
Savings: ~$32-96/month (depending on region)
|
||||
|
||||
### SPOT Instances (EKS)
|
||||
|
||||
Use SPOT instances for EKS nodes:
|
||||
|
||||
```hcl
|
||||
capacity_type = "SPOT"
|
||||
```
|
||||
|
||||
Savings: Up to 90% on compute costs (with interruption risk)
|
||||
|
||||
### Auto Scaling
|
||||
|
||||
Configure aggressive scale-down policies:
|
||||
|
||||
```hcl
|
||||
# EC2
|
||||
min_instances = 0 # Scale to zero during off-hours
|
||||
|
||||
# EKS
|
||||
min_nodes = 0 # Requires cluster autoscaler
|
||||
```
|
||||
|
||||
### Storage Optimization
|
||||
|
||||
Use GP3 instead of GP2:
|
||||
|
||||
```hcl
|
||||
ebs_volume_type = "gp3" # EC2
|
||||
|
||||
# EKS
|
||||
storageClass: "gp3" # Helm values
|
||||
```
|
||||
|
||||
Savings: ~20% on storage costs
|
||||
|
||||
## Cleanup
|
||||
|
||||
### EC2
|
||||
|
||||
```bash
|
||||
# Using script
|
||||
../scripts/destroy-ec2.sh
|
||||
|
||||
# Or manually
|
||||
cd deployments/ec2
|
||||
terraform destroy
|
||||
```
|
||||
|
||||
### EKS
|
||||
|
||||
```bash
|
||||
# Using script
|
||||
../scripts/destroy-eks.sh
|
||||
|
||||
# Or manually
|
||||
helm uninstall code-server -n code-server
|
||||
kubectl delete namespace code-server
|
||||
cd deployments/eks
|
||||
terraform destroy
|
||||
```
|
||||
|
||||
**Important**: Terraform destroy will remove all resources including:
|
||||
- EC2 instances / EKS cluster
|
||||
- Load balancers
|
||||
- VPC and networking
|
||||
- KMS keys (after 7-30 day waiting period)
|
||||
- CloudWatch logs (based on retention settings)
|
||||
|
||||
## Support and Contributing
|
||||
|
||||
For issues or questions:
|
||||
1. Check the [troubleshooting](#troubleshooting) section
|
||||
2. Review [code-server documentation](https://coder.com/docs/code-server)
|
||||
3. Check AWS service health dashboard
|
||||
4. Review CloudWatch logs
|
||||
|
||||
## License
|
||||
|
||||
This Terraform configuration is provided as-is under the MIT License.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Code-Server Documentation](https://coder.com/docs/code-server)
|
||||
- [OAuth2 Proxy Documentation](https://oauth2-proxy.github.io/oauth2-proxy/)
|
||||
- [AWS EKS Best Practices](https://aws.github.io/aws-eks-best-practices/)
|
||||
- [Terraform AWS Provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs)
|
||||
494
terraform/SAML-SETUP-GUIDE.md
Normal file
494
terraform/SAML-SETUP-GUIDE.md
Normal file
|
|
@ -0,0 +1,494 @@
|
|||
# SAML/OIDC Authentication Setup Guide
|
||||
|
||||
This guide provides step-by-step instructions for configuring various identity providers with code-server deployment.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Okta Setup](#okta-setup)
|
||||
- [Azure Active Directory Setup](#azure-active-directory-setup)
|
||||
- [Google Workspace Setup](#google-workspace-setup)
|
||||
- [AWS IAM Identity Center (SSO) Setup](#aws-iam-identity-center-sso-setup)
|
||||
- [Generic OIDC Provider](#generic-oidc-provider)
|
||||
- [Testing Authentication](#testing-authentication)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
|
||||
## Overview
|
||||
|
||||
The code-server deployment uses OAuth2 Proxy to provide authentication via SAML/OIDC. This acts as a reverse proxy that handles authentication before requests reach code-server.
|
||||
|
||||
### Key Concepts
|
||||
|
||||
- **OIDC Discovery URL**: Endpoint that provides IdP configuration
|
||||
- **Client ID**: Unique identifier for your application
|
||||
- **Client Secret**: Secret key for authentication
|
||||
- **Redirect URI**: URL where users return after authentication
|
||||
- **Cookie Secret**: Secret for encrypting session cookies
|
||||
|
||||
## Okta Setup
|
||||
|
||||
### 1. Create Application in Okta
|
||||
|
||||
1. Log in to your Okta admin console
|
||||
2. Navigate to **Applications** → **Applications**
|
||||
3. Click **Create App Integration**
|
||||
4. Select:
|
||||
- **Sign-in method**: OIDC - OpenID Connect
|
||||
- **Application type**: Web Application
|
||||
5. Click **Next**
|
||||
|
||||
### 2. Configure Application
|
||||
|
||||
**General Settings:**
|
||||
- **App integration name**: Code-Server
|
||||
- **Logo**: (optional) Upload code-server logo
|
||||
|
||||
**Sign-in redirect URIs:**
|
||||
```
|
||||
https://code-server.example.com/oauth2/callback
|
||||
```
|
||||
|
||||
**Sign-out redirect URIs:**
|
||||
```
|
||||
https://code-server.example.com
|
||||
```
|
||||
|
||||
**Assignments:**
|
||||
- **Controlled access**: Choose who can access (Everyone, specific groups, etc.)
|
||||
|
||||
Click **Save**
|
||||
|
||||
### 3. Get Configuration Values
|
||||
|
||||
After creating the application:
|
||||
|
||||
1. Copy **Client ID**
|
||||
2. Copy **Client Secret** (click "Show" if hidden)
|
||||
3. Note your Okta domain (e.g., `dev-12345.okta.com`)
|
||||
|
||||
### 4. Configure Terraform Variables
|
||||
|
||||
```hcl
|
||||
# terraform.tfvars
|
||||
|
||||
oauth2_issuer_url = "https://dev-12345.okta.com/.well-known/openid-configuration"
|
||||
oauth2_client_id = "<YOUR_CLIENT_ID>"
|
||||
oauth2_client_secret = "<YOUR_CLIENT_SECRET>"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
|
||||
# Generate cookie secret
|
||||
# python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())'
|
||||
oauth2_cookie_secret = "<GENERATED_SECRET>"
|
||||
|
||||
# Optional: Restrict to specific users
|
||||
oauth2_allowed_emails = [
|
||||
"user1@company.com",
|
||||
"user2@company.com"
|
||||
]
|
||||
```
|
||||
|
||||
### 5. Assign Users
|
||||
|
||||
In Okta admin console:
|
||||
1. Go to **Applications** → **Code-Server**
|
||||
2. Click **Assignments** tab
|
||||
3. Click **Assign** → **Assign to People** or **Assign to Groups**
|
||||
4. Add users/groups who should have access
|
||||
|
||||
## Azure Active Directory Setup
|
||||
|
||||
### 1. Register Application
|
||||
|
||||
1. Log in to [Azure Portal](https://portal.azure.com)
|
||||
2. Navigate to **Azure Active Directory** → **App registrations**
|
||||
3. Click **New registration**
|
||||
|
||||
### 2. Configure Application
|
||||
|
||||
**Name:** Code-Server
|
||||
**Supported account types:**
|
||||
- Single tenant (most common)
|
||||
- Or Multi-tenant if needed
|
||||
|
||||
**Redirect URI:**
|
||||
- Platform: **Web**
|
||||
- URL: `https://code-server.example.com/oauth2/callback`
|
||||
|
||||
Click **Register**
|
||||
|
||||
### 3. Configure Authentication
|
||||
|
||||
1. Go to **Authentication** in left menu
|
||||
2. Under **Implicit grant and hybrid flows**, check:
|
||||
- ✅ ID tokens (used for implicit and hybrid flows)
|
||||
3. Under **Advanced settings**:
|
||||
- Allow public client flows: **No**
|
||||
4. Click **Save**
|
||||
|
||||
### 4. Create Client Secret
|
||||
|
||||
1. Go to **Certificates & secrets** in left menu
|
||||
2. Click **New client secret**
|
||||
3. Description: Code-Server
|
||||
4. Expires: Choose duration (24 months recommended)
|
||||
5. Click **Add**
|
||||
6. **Copy the secret value immediately** (it won't be shown again)
|
||||
|
||||
### 5. API Permissions
|
||||
|
||||
1. Go to **API permissions** in left menu
|
||||
2. Verify these permissions exist:
|
||||
- Microsoft Graph → `openid`
|
||||
- Microsoft Graph → `profile`
|
||||
- Microsoft Graph → `email`
|
||||
3. Click **Grant admin consent** (if you have admin rights)
|
||||
|
||||
### 6. Get Configuration Values
|
||||
|
||||
From **Overview** page:
|
||||
- **Application (client) ID**: Copy this
|
||||
- **Directory (tenant) ID**: Copy this
|
||||
|
||||
### 7. Configure Terraform Variables
|
||||
|
||||
```hcl
|
||||
# terraform.tfvars
|
||||
|
||||
oauth2_issuer_url = "https://login.microsoftonline.com/<TENANT_ID>/v2.0/.well-known/openid-configuration"
|
||||
oauth2_client_id = "<APPLICATION_CLIENT_ID>"
|
||||
oauth2_client_secret = "<CLIENT_SECRET>"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oauth2_cookie_secret = "<GENERATED_SECRET>"
|
||||
|
||||
# Optional: Restrict by email
|
||||
oauth2_allowed_emails = [
|
||||
"user1@company.com",
|
||||
"user2@company.com"
|
||||
]
|
||||
```
|
||||
|
||||
### 8. Restrict Access (Optional)
|
||||
|
||||
To limit access to specific users/groups:
|
||||
|
||||
1. Go to **Enterprise applications**
|
||||
2. Find your **Code-Server** application
|
||||
3. Go to **Properties**
|
||||
4. Set **User assignment required?** to **Yes**
|
||||
5. Go to **Users and groups**
|
||||
6. Click **Add user/group**
|
||||
7. Select users or groups
|
||||
|
||||
## Google Workspace Setup
|
||||
|
||||
### 1. Create OAuth Client
|
||||
|
||||
1. Go to [Google Cloud Console](https://console.cloud.google.com)
|
||||
2. Select or create a project
|
||||
3. Navigate to **APIs & Services** → **Credentials**
|
||||
4. Click **Create Credentials** → **OAuth client ID**
|
||||
|
||||
### 2. Configure OAuth Consent Screen
|
||||
|
||||
If prompted:
|
||||
1. Click **Configure Consent Screen**
|
||||
2. User Type: **Internal** (for Google Workspace) or **External**
|
||||
3. Fill in application information:
|
||||
- App name: Code-Server
|
||||
- User support email: Your email
|
||||
- Developer contact: Your email
|
||||
4. Scopes: Add `openid`, `email`, `profile`
|
||||
5. Click **Save and Continue**
|
||||
|
||||
### 3. Create OAuth Client ID
|
||||
|
||||
**Application type:** Web application
|
||||
|
||||
**Name:** Code-Server
|
||||
|
||||
**Authorized redirect URIs:**
|
||||
```
|
||||
https://code-server.example.com/oauth2/callback
|
||||
```
|
||||
|
||||
Click **Create**
|
||||
|
||||
### 4. Get Configuration Values
|
||||
|
||||
After creation:
|
||||
- Copy **Client ID**
|
||||
- Copy **Client Secret**
|
||||
|
||||
### 5. Configure Terraform Variables
|
||||
|
||||
```hcl
|
||||
# terraform.tfvars
|
||||
|
||||
oauth2_issuer_url = "https://accounts.google.com/.well-known/openid-configuration"
|
||||
oauth2_client_id = "<YOUR_CLIENT_ID>.apps.googleusercontent.com"
|
||||
oauth2_client_secret = "<YOUR_CLIENT_SECRET>"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oauth2_cookie_secret = "<GENERATED_SECRET>"
|
||||
|
||||
# Restrict to your domain
|
||||
oauth2_allowed_emails = [
|
||||
"user1@company.com",
|
||||
"user2@company.com"
|
||||
]
|
||||
```
|
||||
|
||||
### 6. Domain Restriction (Google Workspace)
|
||||
|
||||
To restrict to your entire domain:
|
||||
|
||||
```yaml
|
||||
# For EKS: k8s/oauth2-proxy.yaml
|
||||
# Add to ConfigMap:
|
||||
email_domains = ["company.com"]
|
||||
```
|
||||
|
||||
## AWS IAM Identity Center (SSO) Setup
|
||||
|
||||
### 1. Enable IAM Identity Center
|
||||
|
||||
1. Go to [IAM Identity Center](https://console.aws.amazon.com/singlesignon)
|
||||
2. Enable IAM Identity Center if not already enabled
|
||||
3. Note your **AWS access portal URL**
|
||||
|
||||
### 2. Register Application
|
||||
|
||||
1. In IAM Identity Center, go to **Applications**
|
||||
2. Click **Add application**
|
||||
3. Select **I have an application I want to set up**
|
||||
4. Click **Next**
|
||||
|
||||
### 3. Configure Application
|
||||
|
||||
**Display name:** Code-Server
|
||||
|
||||
**Description:** Code-Server IDE
|
||||
|
||||
**Application start URL:** `https://code-server.example.com`
|
||||
|
||||
**Application metadata:**
|
||||
- Choose **Manual entry**
|
||||
- **Application ACS URL**: `https://code-server.example.com/oauth2/callback`
|
||||
- **Application SAML audience**: `https://code-server.example.com`
|
||||
|
||||
Click **Submit**
|
||||
|
||||
### 4. Get Configuration Values
|
||||
|
||||
1. Download the **IAM Identity Center SAML metadata file**
|
||||
2. Note the **Client ID** (from application details)
|
||||
3. Create a **Client Secret** (in application settings)
|
||||
|
||||
### 5. Configure Terraform Variables
|
||||
|
||||
```hcl
|
||||
# terraform.tfvars
|
||||
|
||||
# Use OIDC endpoint for your region
|
||||
oauth2_issuer_url = "https://portal.sso.<region>.amazonaws.com/.well-known/openid-configuration"
|
||||
oauth2_client_id = "<YOUR_CLIENT_ID>"
|
||||
oauth2_client_secret = "<YOUR_CLIENT_SECRET>"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oauth2_cookie_secret = "<GENERATED_SECRET>"
|
||||
```
|
||||
|
||||
### 6. Assign Users
|
||||
|
||||
1. Go to **Assigned users** tab
|
||||
2. Click **Assign users**
|
||||
3. Select users or groups
|
||||
4. Click **Assign users**
|
||||
|
||||
## Generic OIDC Provider
|
||||
|
||||
For any OIDC-compliant provider:
|
||||
|
||||
### 1. Required Information
|
||||
|
||||
Obtain from your IdP:
|
||||
- OIDC Discovery URL (usually `https://idp.example.com/.well-known/openid-configuration`)
|
||||
- Client ID
|
||||
- Client Secret
|
||||
- Supported scopes (typically `openid`, `profile`, `email`)
|
||||
|
||||
### 2. Register Redirect URI
|
||||
|
||||
In your IdP, register:
|
||||
```
|
||||
https://code-server.example.com/oauth2/callback
|
||||
```
|
||||
|
||||
### 3. Configure Terraform Variables
|
||||
|
||||
```hcl
|
||||
# terraform.tfvars
|
||||
|
||||
oauth2_issuer_url = "<OIDC_DISCOVERY_URL>"
|
||||
oauth2_client_id = "<CLIENT_ID>"
|
||||
oauth2_client_secret = "<CLIENT_SECRET>"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oauth2_cookie_secret = "<GENERATED_SECRET>"
|
||||
```
|
||||
|
||||
## Testing Authentication
|
||||
|
||||
### 1. Deploy Application
|
||||
|
||||
```bash
|
||||
# EC2
|
||||
cd deployments/ec2
|
||||
terraform apply
|
||||
|
||||
# EKS
|
||||
cd deployments/eks
|
||||
terraform apply
|
||||
kubectl apply -f k8s/oauth2-proxy.yaml
|
||||
```
|
||||
|
||||
### 2. Access Application
|
||||
|
||||
Navigate to your code-server URL (e.g., `https://code-server.example.com`)
|
||||
|
||||
### 3. Expected Flow
|
||||
|
||||
1. Browser redirects to IdP login page
|
||||
2. Enter credentials and authenticate
|
||||
3. IdP redirects back to code-server with authorization code
|
||||
4. OAuth2 Proxy exchanges code for tokens
|
||||
5. Session cookie is set
|
||||
6. Request is proxied to code-server
|
||||
7. Code-server interface loads
|
||||
|
||||
### 4. Verify Session
|
||||
|
||||
After successful login:
|
||||
- Check browser cookies for `_oauth2_proxy` cookie
|
||||
- Cookie should be HttpOnly, Secure, and SameSite
|
||||
|
||||
### 5. Test Logout
|
||||
|
||||
Navigate to: `https://code-server.example.com/oauth2/sign_out`
|
||||
|
||||
You should be logged out and redirected to IdP
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### Redirect URI Mismatch
|
||||
|
||||
**Error:** `redirect_uri_mismatch` or similar
|
||||
|
||||
**Solution:**
|
||||
1. Verify redirect URI in IdP exactly matches Terraform configuration
|
||||
2. Check for trailing slashes (should not have them)
|
||||
3. Ensure HTTPS (not HTTP)
|
||||
|
||||
#### Invalid Client
|
||||
|
||||
**Error:** `invalid_client`
|
||||
|
||||
**Solution:**
|
||||
1. Verify Client ID is correct
|
||||
2. Verify Client Secret is correct and not expired
|
||||
3. Check that client is enabled in IdP
|
||||
|
||||
#### Cookie Errors
|
||||
|
||||
**Error:** Authentication succeeds but session is not maintained
|
||||
|
||||
**Solution:**
|
||||
1. Ensure `oauth2_cookie_secret` is set and is 32 bytes (base64 encoded)
|
||||
2. Verify domain in cookie matches your URL
|
||||
3. Check browser is accepting cookies
|
||||
4. Ensure HTTPS is configured (cookies may not work over HTTP)
|
||||
|
||||
#### Access Denied
|
||||
|
||||
**Error:** User authenticates but gets "Access Denied"
|
||||
|
||||
**Solution:**
|
||||
1. Check `oauth2_allowed_emails` list
|
||||
2. Verify user is assigned to application in IdP
|
||||
3. Check OAuth2 Proxy logs:
|
||||
```bash
|
||||
# EC2
|
||||
aws logs tail /aws/ec2/<prefix>-code-server --filter-pattern oauth2-proxy
|
||||
|
||||
# EKS
|
||||
kubectl logs -n code-server -l app=oauth2-proxy
|
||||
```
|
||||
|
||||
#### Issuer URL Not Accessible
|
||||
|
||||
**Error:** `error fetching OIDC discovery`
|
||||
|
||||
**Solution:**
|
||||
1. Verify issuer URL is accessible from your network
|
||||
2. Check security groups allow outbound HTTPS
|
||||
3. Verify URL is correct (test in browser)
|
||||
4. Check for typos in URL
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug logging:
|
||||
|
||||
**EC2:**
|
||||
|
||||
Edit `modules/ec2/user-data.sh` and add to oauth2-proxy args:
|
||||
```yaml
|
||||
- --log-level=debug
|
||||
```
|
||||
|
||||
**EKS:**
|
||||
|
||||
Edit `deployments/eks/k8s/oauth2-proxy.yaml`:
|
||||
```yaml
|
||||
args:
|
||||
- --log-level=debug
|
||||
```
|
||||
|
||||
### Testing Connectivity
|
||||
|
||||
**Test OIDC Discovery:**
|
||||
```bash
|
||||
curl -s https://your-idp.com/.well-known/openid-configuration | jq .
|
||||
```
|
||||
|
||||
**Test Redirect:**
|
||||
```bash
|
||||
# Should show OAuth2 login page
|
||||
curl -I https://code-server.example.com
|
||||
```
|
||||
|
||||
**Check OAuth2 Proxy Health:**
|
||||
```bash
|
||||
# EC2
|
||||
curl http://<instance-ip>:4180/ping
|
||||
|
||||
# EKS
|
||||
kubectl port-forward -n code-server svc/oauth2-proxy 4180:4180
|
||||
curl http://localhost:4180/ping
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Use HTTPS**: Always use HTTPS for production
|
||||
2. **Rotate Secrets**: Regularly rotate client secrets and cookie secrets
|
||||
3. **Limit Scope**: Request only necessary OIDC scopes
|
||||
4. **Session Timeout**: Configure appropriate session expiry
|
||||
5. **Restrict Emails**: Use `oauth2_allowed_emails` to limit access
|
||||
6. **Monitor Logs**: Regularly review authentication logs
|
||||
7. **Use Groups**: Manage access via IdP groups rather than individual users
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [OAuth2 Proxy Documentation](https://oauth2-proxy.github.io/oauth2-proxy/)
|
||||
- [OIDC Specification](https://openid.net/connect/)
|
||||
- [Okta OIDC Guide](https://developer.okta.com/docs/concepts/oauth-openid/)
|
||||
- [Azure AD OIDC Guide](https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-protocols-oidc)
|
||||
- [Google OIDC Guide](https://developers.google.com/identity/protocols/oauth2/openid-connect)
|
||||
117
terraform/deployments/ec2/main.tf
Normal file
117
terraform/deployments/ec2/main.tf
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
# EC2 Deployment Configuration for Code-Server
|
||||
# This file creates all necessary infrastructure to deploy code-server on EC2
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "~> 3.5"
|
||||
}
|
||||
}
|
||||
|
||||
# Uncomment and configure for remote state storage
|
||||
# backend "s3" {
|
||||
# bucket = "your-terraform-state-bucket"
|
||||
# key = "code-server/ec2/terraform.tfstate"
|
||||
# region = "us-east-1"
|
||||
# encrypt = true
|
||||
# dynamodb_table = "terraform-state-lock"
|
||||
# }
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = var.aws_region
|
||||
|
||||
default_tags {
|
||||
tags = {
|
||||
Project = "code-server"
|
||||
Environment = var.environment
|
||||
ManagedBy = "Terraform"
|
||||
Deployment = "EC2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
name_prefix = "${var.project_name}-${var.environment}"
|
||||
cluster_name = "${local.name_prefix}-eks" # Used for VPC subnet tagging
|
||||
|
||||
common_tags = {
|
||||
Project = var.project_name
|
||||
Environment = var.environment
|
||||
ManagedBy = "Terraform"
|
||||
Deployment = "EC2"
|
||||
}
|
||||
}
|
||||
|
||||
# VPC Module
|
||||
module "vpc" {
|
||||
source = "../../modules/vpc"
|
||||
|
||||
name_prefix = local.name_prefix
|
||||
vpc_cidr = var.vpc_cidr
|
||||
public_subnet_cidrs = var.public_subnet_cidrs
|
||||
private_subnet_cidrs = var.private_subnet_cidrs
|
||||
aws_region = var.aws_region
|
||||
cluster_name = local.cluster_name
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = var.single_nat_gateway
|
||||
enable_vpc_endpoints = true
|
||||
enable_flow_logs = true
|
||||
flow_logs_retention_days = 30
|
||||
|
||||
tags = local.common_tags
|
||||
}
|
||||
|
||||
# Security Module
|
||||
module "security" {
|
||||
source = "../../modules/security"
|
||||
|
||||
name_prefix = local.name_prefix
|
||||
vpc_id = module.vpc.vpc_id
|
||||
allowed_cidr_blocks = var.allowed_cidr_blocks
|
||||
ssh_allowed_cidr_blocks = var.ssh_allowed_cidr_blocks
|
||||
|
||||
tags = local.common_tags
|
||||
}
|
||||
|
||||
# EC2 Module for Code-Server
|
||||
module "code_server_ec2" {
|
||||
source = "../../modules/ec2"
|
||||
|
||||
name_prefix = local.name_prefix
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnet_ids
|
||||
alb_subnet_ids = var.internal_alb ? module.vpc.private_subnet_ids : module.vpc.public_subnet_ids
|
||||
security_group_id = module.security.code_server_ec2_security_group_id
|
||||
alb_security_group_id = module.security.alb_security_group_id
|
||||
iam_instance_profile_name = module.security.code_server_ec2_instance_profile_name
|
||||
kms_key_arn = module.security.kms_key_arn
|
||||
aws_region = var.aws_region
|
||||
|
||||
instance_type = var.instance_type
|
||||
ebs_volume_size = var.ebs_volume_size
|
||||
min_instances = var.min_instances
|
||||
max_instances = var.max_instances
|
||||
desired_instances = var.desired_instances
|
||||
code_server_version = var.code_server_version
|
||||
certificate_arn = var.certificate_arn
|
||||
internal_alb = var.internal_alb
|
||||
enable_autoscaling = var.enable_autoscaling
|
||||
|
||||
# OAuth2 Proxy Configuration
|
||||
oauth2_client_id = var.oauth2_client_id
|
||||
oauth2_client_secret = var.oauth2_client_secret
|
||||
oauth2_issuer_url = var.oauth2_issuer_url
|
||||
oauth2_redirect_url = var.oauth2_redirect_url
|
||||
oauth2_cookie_secret = var.oauth2_cookie_secret
|
||||
oauth2_allowed_emails = var.oauth2_allowed_emails
|
||||
|
||||
tags = local.common_tags
|
||||
}
|
||||
77
terraform/deployments/ec2/outputs.tf
Normal file
77
terraform/deployments/ec2/outputs.tf
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
# EC2 Deployment Outputs
|
||||
|
||||
output "vpc_id" {
|
||||
description = "ID of the VPC"
|
||||
value = module.vpc.vpc_id
|
||||
}
|
||||
|
||||
output "private_subnet_ids" {
|
||||
description = "IDs of private subnets"
|
||||
value = module.vpc.private_subnet_ids
|
||||
}
|
||||
|
||||
output "public_subnet_ids" {
|
||||
description = "IDs of public subnets"
|
||||
value = module.vpc.public_subnet_ids
|
||||
}
|
||||
|
||||
output "alb_dns_name" {
|
||||
description = "DNS name of the Application Load Balancer"
|
||||
value = module.code_server_ec2.alb_dns_name
|
||||
}
|
||||
|
||||
output "alb_url" {
|
||||
description = "URL to access Code-Server"
|
||||
value = var.certificate_arn != "" ? "https://${module.code_server_ec2.alb_dns_name}" : "http://${module.code_server_ec2.alb_dns_name}"
|
||||
}
|
||||
|
||||
output "code_server_password_secret_arn" {
|
||||
description = "ARN of the Secrets Manager secret containing code-server password"
|
||||
value = module.code_server_ec2.code_server_password_secret_arn
|
||||
}
|
||||
|
||||
output "autoscaling_group_name" {
|
||||
description = "Name of the Auto Scaling Group"
|
||||
value = module.code_server_ec2.autoscaling_group_name
|
||||
}
|
||||
|
||||
output "kms_key_arn" {
|
||||
description = "ARN of the KMS key for encryption"
|
||||
value = module.security.kms_key_arn
|
||||
}
|
||||
|
||||
output "next_steps" {
|
||||
description = "Next steps to complete the setup"
|
||||
value = <<-EOT
|
||||
|
||||
Code-Server EC2 Deployment Complete!
|
||||
|
||||
Next Steps:
|
||||
1. Access Code-Server at: ${var.certificate_arn != "" ? "https" : "http"}://${module.code_server_ec2.alb_dns_name}
|
||||
|
||||
2. Get the code-server password:
|
||||
aws secretsmanager get-secret-value \
|
||||
--secret-id ${module.code_server_ec2.code_server_password_secret_arn} \
|
||||
--region ${var.aws_region} \
|
||||
--query SecretString \
|
||||
--output text
|
||||
|
||||
3. Configure DNS (if using custom domain):
|
||||
- Create a CNAME record pointing to: ${module.code_server_ec2.alb_dns_name}
|
||||
- Update oauth2_redirect_url with your domain
|
||||
|
||||
4. Monitor the deployment:
|
||||
- CloudWatch Logs: /aws/ec2/${local.name_prefix}-code-server
|
||||
- Auto Scaling Group: ${module.code_server_ec2.autoscaling_group_name}
|
||||
|
||||
5. For SAML/OIDC authentication:
|
||||
- Ensure your IdP is configured with the redirect URL: ${var.oauth2_redirect_url}
|
||||
- Verify allowed email addresses are configured
|
||||
|
||||
Security Notes:
|
||||
- All instances are in private subnets
|
||||
- ALB is ${var.internal_alb ? "internal (private network only)" : "public"}
|
||||
- Data is encrypted at rest using KMS
|
||||
- VPC Flow Logs are enabled for monitoring
|
||||
EOT
|
||||
}
|
||||
47
terraform/deployments/ec2/terraform.tfvars.example
Normal file
47
terraform/deployments/ec2/terraform.tfvars.example
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
# Example Terraform Variables for EC2 Deployment
|
||||
# Copy this file to terraform.tfvars and fill in your values
|
||||
|
||||
aws_region = "us-east-1"
|
||||
project_name = "code-server"
|
||||
environment = "dev"
|
||||
|
||||
# VPC Configuration
|
||||
vpc_cidr = "10.0.0.0/16"
|
||||
public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
private_subnet_cidrs = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
single_nat_gateway = false # Set to true for cost optimization (single NAT gateway)
|
||||
|
||||
# Security Configuration
|
||||
allowed_cidr_blocks = ["10.0.0.0/8"] # Restrict to private network
|
||||
ssh_allowed_cidr_blocks = [] # No SSH access (use SSM instead)
|
||||
|
||||
# EC2 Configuration
|
||||
instance_type = "t3.medium"
|
||||
ebs_volume_size = 50
|
||||
min_instances = 1
|
||||
max_instances = 3
|
||||
desired_instances = 1
|
||||
code_server_version = "latest"
|
||||
enable_autoscaling = true
|
||||
|
||||
# Load Balancer Configuration
|
||||
# Get certificate ARN from ACM or leave empty for HTTP
|
||||
certificate_arn = "" # Example: "arn:aws:acm:us-east-1:123456789012:certificate/xxxxx"
|
||||
internal_alb = true # Set to false for internet-facing ALB
|
||||
|
||||
# OAuth2 / SAML Configuration
|
||||
# Configure these values based on your IdP (Okta, Azure AD, etc.)
|
||||
oauth2_client_id = "your-client-id-from-idp"
|
||||
oauth2_client_secret = "your-client-secret-from-idp"
|
||||
oauth2_issuer_url = "https://your-idp.com/.well-known/openid-configuration"
|
||||
oauth2_redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
|
||||
# Generate cookie secret with:
|
||||
# python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())'
|
||||
oauth2_cookie_secret = "generate-random-secret-here"
|
||||
|
||||
# Allowed email addresses (leave empty to allow all authenticated users)
|
||||
oauth2_allowed_emails = [
|
||||
# "user1@example.com",
|
||||
# "user2@example.com"
|
||||
]
|
||||
147
terraform/deployments/ec2/variables.tf
Normal file
147
terraform/deployments/ec2/variables.tf
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
# EC2 Deployment Variables
|
||||
|
||||
variable "aws_region" {
|
||||
description = "AWS region for deployment"
|
||||
type = string
|
||||
default = "us-east-1"
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "Project name"
|
||||
type = string
|
||||
default = "code-server"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment name (dev, staging, prod)"
|
||||
type = string
|
||||
default = "dev"
|
||||
}
|
||||
|
||||
# VPC Configuration
|
||||
variable "vpc_cidr" {
|
||||
description = "CIDR block for VPC"
|
||||
type = string
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "public_subnet_cidrs" {
|
||||
description = "CIDR blocks for public subnets"
|
||||
type = list(string)
|
||||
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
}
|
||||
|
||||
variable "private_subnet_cidrs" {
|
||||
description = "CIDR blocks for private subnets"
|
||||
type = list(string)
|
||||
default = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
}
|
||||
|
||||
variable "single_nat_gateway" {
|
||||
description = "Use a single NAT gateway (cost optimization)"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
# Security Configuration
|
||||
variable "allowed_cidr_blocks" {
|
||||
description = "CIDR blocks allowed to access the ALB"
|
||||
type = list(string)
|
||||
default = ["10.0.0.0/8"] # Restrict to private network
|
||||
}
|
||||
|
||||
variable "ssh_allowed_cidr_blocks" {
|
||||
description = "CIDR blocks allowed to SSH into instances"
|
||||
type = list(string)
|
||||
default = [] # No SSH access by default
|
||||
}
|
||||
|
||||
# EC2 Configuration
|
||||
variable "instance_type" {
|
||||
description = "EC2 instance type"
|
||||
type = string
|
||||
default = "t3.medium"
|
||||
}
|
||||
|
||||
variable "ebs_volume_size" {
|
||||
description = "Size of EBS volume in GB"
|
||||
type = number
|
||||
default = 50
|
||||
}
|
||||
|
||||
variable "min_instances" {
|
||||
description = "Minimum number of instances"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "max_instances" {
|
||||
description = "Maximum number of instances"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "desired_instances" {
|
||||
description = "Desired number of instances"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "code_server_version" {
|
||||
description = "Version of code-server Docker image"
|
||||
type = string
|
||||
default = "latest"
|
||||
}
|
||||
|
||||
variable "enable_autoscaling" {
|
||||
description = "Enable auto scaling"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
# Load Balancer Configuration
|
||||
variable "certificate_arn" {
|
||||
description = "ARN of ACM certificate for HTTPS (leave empty to use HTTP)"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "internal_alb" {
|
||||
description = "Whether the ALB should be internal (private network only)"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
# OAuth2 / SAML Configuration
|
||||
variable "oauth2_client_id" {
|
||||
description = "OAuth2 client ID from your SAML/OIDC provider"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "oauth2_client_secret" {
|
||||
description = "OAuth2 client secret from your SAML/OIDC provider"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "oauth2_issuer_url" {
|
||||
description = "OAuth2 issuer URL (OIDC discovery endpoint)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "oauth2_redirect_url" {
|
||||
description = "OAuth2 redirect URL (https://your-domain.com/oauth2/callback)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "oauth2_cookie_secret" {
|
||||
description = "OAuth2 cookie secret (generate with: python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())')"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "oauth2_allowed_emails" {
|
||||
description = "List of allowed email addresses (leave empty to allow all)"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
144
terraform/deployments/eks/k8s/code-server-values.yaml
Normal file
144
terraform/deployments/eks/k8s/code-server-values.yaml
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
# Helm values for Code-Server deployment on EKS
|
||||
# This file should be customized before deployment
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: codercom/code-server
|
||||
tag: "4.18.0"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: "code-server"
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
annotations: {}
|
||||
name: "code-server"
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext:
|
||||
fsGroup: 1000
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
enabled: true
|
||||
fsGroup: 1000
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8080
|
||||
|
||||
# Ingress configuration with AWS Load Balancer Controller
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "alb"
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/scheme: internal
|
||||
alb.ingress.kubernetes.io/target-type: ip
|
||||
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
|
||||
alb.ingress.kubernetes.io/ssl-redirect: '443'
|
||||
alb.ingress.kubernetes.io/healthcheck-path: /healthz
|
||||
alb.ingress.kubernetes.io/healthcheck-protocol: HTTP
|
||||
alb.ingress.kubernetes.io/healthcheck-interval-seconds: '30'
|
||||
alb.ingress.kubernetes.io/healthcheck-timeout-seconds: '5'
|
||||
alb.ingress.kubernetes.io/healthy-threshold-count: '2'
|
||||
alb.ingress.kubernetes.io/unhealthy-threshold-count: '2'
|
||||
# Uncomment and set your certificate ARN
|
||||
# alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:region:account:certificate/xxxxx
|
||||
# Uncomment to set custom subnets
|
||||
# alb.ingress.kubernetes.io/subnets: subnet-xxxxx,subnet-yyyyy
|
||||
# Uncomment to set custom security groups
|
||||
# alb.ingress.kubernetes.io/security-groups: sg-xxxxx
|
||||
hosts:
|
||||
- host: code-server.example.com
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls: []
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
|
||||
# Persistent storage configuration
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "gp3"
|
||||
accessMode: ReadWriteOnce
|
||||
size: 20Gi
|
||||
annotations: {}
|
||||
|
||||
# Volume permissions init container
|
||||
volumePermissions:
|
||||
enabled: true
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# Extra environment variables
|
||||
extraVars:
|
||||
- name: DISABLE_TELEMETRY
|
||||
value: "true"
|
||||
|
||||
# Extra arguments for code-server
|
||||
extraArgs: []
|
||||
|
||||
# Lifecycle hooks
|
||||
lifecycle:
|
||||
enabled: false
|
||||
|
||||
# Extra init containers (e.g., for installing extensions)
|
||||
extraInitContainers: |
|
||||
# - name: install-extensions
|
||||
# image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
# imagePullPolicy: IfNotPresent
|
||||
# env:
|
||||
# - name: SERVICE_URL
|
||||
# value: https://open-vsx.org/vscode/gallery
|
||||
# - name: ITEM_URL
|
||||
# value: https://open-vsx.org/vscode/item
|
||||
# command:
|
||||
# - sh
|
||||
# - -c
|
||||
# - |
|
||||
# code-server --install-extension ms-python.python
|
||||
# code-server --install-extension golang.Go
|
||||
# volumeMounts:
|
||||
# - name: data
|
||||
# mountPath: /home/coder
|
||||
|
||||
# Extra containers (e.g., OAuth2 Proxy sidecar)
|
||||
extraContainers: ""
|
||||
|
||||
# Extra secret mounts
|
||||
extraSecretMounts: []
|
||||
|
||||
# Extra volume mounts
|
||||
extraVolumeMounts: []
|
||||
|
||||
# Extra configmap mounts
|
||||
extraConfigmapMounts: []
|
||||
|
||||
# Extra ports
|
||||
extraPorts: []
|
||||
224
terraform/deployments/eks/k8s/oauth2-proxy.yaml
Normal file
224
terraform/deployments/eks/k8s/oauth2-proxy.yaml
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
# OAuth2 Proxy deployment for SAML authentication
|
||||
# This provides authentication layer for Code-Server on EKS
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: code-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: oauth2-proxy-secrets
|
||||
namespace: code-server
|
||||
type: Opaque
|
||||
stringData:
|
||||
client-id: "YOUR_SAML_CLIENT_ID"
|
||||
client-secret: "YOUR_SAML_CLIENT_SECRET"
|
||||
cookie-secret: "GENERATE_WITH_python_-c_import_os_base64_print_base64_urlsafe_b64encode_os_urandom_32_decode"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: oauth2-proxy-config
|
||||
namespace: code-server
|
||||
data:
|
||||
oauth2_proxy.cfg: |
|
||||
provider = "oidc"
|
||||
provider_display_name = "SSO"
|
||||
redirect_url = "https://code-server.example.com/oauth2/callback"
|
||||
oidc_issuer_url = "https://your-saml-idp.com"
|
||||
upstreams = "http://code-server:8080"
|
||||
email_domains = ["*"]
|
||||
cookie_secure = true
|
||||
cookie_httponly = true
|
||||
cookie_samesite = "lax"
|
||||
cookie_refresh = "1h"
|
||||
cookie_expire = "24h"
|
||||
set_xauthrequest = true
|
||||
pass_access_token = true
|
||||
pass_authorization_header = true
|
||||
set_authorization_header = true
|
||||
skip_provider_button = false
|
||||
whitelist_domains = [".example.com"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: code-server
|
||||
labels:
|
||||
app: oauth2-proxy
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
port: 4180
|
||||
targetPort: 4180
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: oauth2-proxy
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: code-server
|
||||
labels:
|
||||
app: oauth2-proxy
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: oauth2-proxy
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: oauth2-proxy
|
||||
spec:
|
||||
serviceAccountName: oauth2-proxy
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
||||
fsGroup: 2000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: oauth2-proxy
|
||||
image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --config=/etc/oauth2-proxy/oauth2_proxy.cfg
|
||||
- --http-address=0.0.0.0:4180
|
||||
env:
|
||||
- name: OAUTH2_PROXY_CLIENT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: oauth2-proxy-secrets
|
||||
key: client-id
|
||||
- name: OAUTH2_PROXY_CLIENT_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: oauth2-proxy-secrets
|
||||
key: client-secret
|
||||
- name: OAUTH2_PROXY_COOKIE_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: oauth2-proxy-secrets
|
||||
key: cookie-secret
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 4180
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ping
|
||||
port: http
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ping
|
||||
port: http
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/oauth2-proxy
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: oauth2-proxy-config
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: code-server
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: code-server
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/scheme: internal
|
||||
alb.ingress.kubernetes.io/target-type: ip
|
||||
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
|
||||
alb.ingress.kubernetes.io/ssl-redirect: '443'
|
||||
alb.ingress.kubernetes.io/healthcheck-path: /ping
|
||||
alb.ingress.kubernetes.io/healthcheck-protocol: HTTP
|
||||
# Uncomment and set your certificate ARN
|
||||
# alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:region:account:certificate/xxxxx
|
||||
spec:
|
||||
ingressClassName: alb
|
||||
rules:
|
||||
- host: code-server.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: oauth2-proxy
|
||||
port:
|
||||
number: 4180
|
||||
---
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: code-server
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: oauth2-proxy
|
||||
---
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: code-server
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: oauth2-proxy
|
||||
minReplicas: 2
|
||||
maxReplicas: 5
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 75
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 80
|
||||
245
terraform/deployments/eks/main.tf
Normal file
245
terraform/deployments/eks/main.tf
Normal file
|
|
@ -0,0 +1,245 @@
|
|||
# EKS Deployment Configuration for Code-Server
|
||||
# This file creates all necessary infrastructure to deploy code-server on EKS
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.23"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = "~> 2.11"
|
||||
}
|
||||
}
|
||||
|
||||
# Uncomment and configure for remote state storage
|
||||
# backend "s3" {
|
||||
# bucket = "your-terraform-state-bucket"
|
||||
# key = "code-server/eks/terraform.tfstate"
|
||||
# region = "us-east-1"
|
||||
# encrypt = true
|
||||
# dynamodb_table = "terraform-state-lock"
|
||||
# }
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = var.aws_region
|
||||
|
||||
default_tags {
|
||||
tags = {
|
||||
Project = "code-server"
|
||||
Environment = var.environment
|
||||
ManagedBy = "Terraform"
|
||||
Deployment = "EKS"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
name_prefix = "${var.project_name}-${var.environment}"
|
||||
cluster_name = "${local.name_prefix}-eks"
|
||||
|
||||
common_tags = {
|
||||
Project = var.project_name
|
||||
Environment = var.environment
|
||||
ManagedBy = "Terraform"
|
||||
Deployment = "EKS"
|
||||
}
|
||||
}
|
||||
|
||||
# VPC Module
|
||||
module "vpc" {
|
||||
source = "../../modules/vpc"
|
||||
|
||||
name_prefix = local.name_prefix
|
||||
vpc_cidr = var.vpc_cidr
|
||||
public_subnet_cidrs = var.public_subnet_cidrs
|
||||
private_subnet_cidrs = var.private_subnet_cidrs
|
||||
aws_region = var.aws_region
|
||||
cluster_name = local.cluster_name
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = var.single_nat_gateway
|
||||
enable_vpc_endpoints = true
|
||||
enable_flow_logs = true
|
||||
flow_logs_retention_days = 30
|
||||
|
||||
tags = local.common_tags
|
||||
}
|
||||
|
||||
# Security Module
|
||||
module "security" {
|
||||
source = "../../modules/security"
|
||||
|
||||
name_prefix = local.name_prefix
|
||||
vpc_id = module.vpc.vpc_id
|
||||
allowed_cidr_blocks = var.allowed_cidr_blocks
|
||||
ssh_allowed_cidr_blocks = var.ssh_allowed_cidr_blocks
|
||||
|
||||
tags = local.common_tags
|
||||
}
|
||||
|
||||
# EKS Module
|
||||
module "eks" {
|
||||
source = "../../modules/eks"
|
||||
|
||||
cluster_name = local.cluster_name
|
||||
cluster_role_arn = module.security.eks_cluster_iam_role_arn
|
||||
node_role_arn = module.security.eks_nodes_iam_role_arn
|
||||
private_subnet_ids = module.vpc.private_subnet_ids
|
||||
public_subnet_ids = module.vpc.public_subnet_ids
|
||||
cluster_security_group_id = module.security.eks_cluster_security_group_id
|
||||
kms_key_arn = module.security.kms_key_arn
|
||||
|
||||
kubernetes_version = var.kubernetes_version
|
||||
endpoint_public_access = var.endpoint_public_access
|
||||
public_access_cidrs = var.public_access_cidrs
|
||||
|
||||
node_instance_types = var.node_instance_types
|
||||
capacity_type = var.capacity_type
|
||||
node_disk_size = var.node_disk_size
|
||||
min_nodes = var.min_nodes
|
||||
max_nodes = var.max_nodes
|
||||
desired_nodes = var.desired_nodes
|
||||
|
||||
enable_ebs_csi_driver = true
|
||||
enable_irsa = true
|
||||
enable_aws_load_balancer_controller = true
|
||||
|
||||
tags = local.common_tags
|
||||
}
|
||||
|
||||
# Configure Kubernetes provider
|
||||
provider "kubernetes" {
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
args = [
|
||||
"eks",
|
||||
"get-token",
|
||||
"--cluster-name",
|
||||
module.eks.cluster_id,
|
||||
"--region",
|
||||
var.aws_region
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Configure Helm provider
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
args = [
|
||||
"eks",
|
||||
"get-token",
|
||||
"--cluster-name",
|
||||
module.eks.cluster_id,
|
||||
"--region",
|
||||
var.aws_region
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Install AWS Load Balancer Controller
|
||||
resource "helm_release" "aws_load_balancer_controller" {
|
||||
name = "aws-load-balancer-controller"
|
||||
repository = "https://aws.github.io/eks-charts"
|
||||
chart = "aws-load-balancer-controller"
|
||||
namespace = "kube-system"
|
||||
version = "1.6.2"
|
||||
|
||||
set {
|
||||
name = "clusterName"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
set {
|
||||
name = "serviceAccount.create"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "serviceAccount.name"
|
||||
value = "aws-load-balancer-controller"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.eks.aws_load_balancer_controller_role_arn
|
||||
}
|
||||
|
||||
set {
|
||||
name = "region"
|
||||
value = var.aws_region
|
||||
}
|
||||
|
||||
set {
|
||||
name = "vpcId"
|
||||
value = module.vpc.vpc_id
|
||||
}
|
||||
|
||||
depends_on = [module.eks]
|
||||
}
|
||||
|
||||
# Create namespace for code-server
|
||||
resource "kubernetes_namespace" "code_server" {
|
||||
metadata {
|
||||
name = "code-server"
|
||||
labels = {
|
||||
name = "code-server"
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [module.eks]
|
||||
}
|
||||
|
||||
# Create secret for OAuth2 Proxy
|
||||
resource "kubernetes_secret" "oauth2_proxy" {
|
||||
count = var.deploy_oauth2_proxy ? 1 : 0
|
||||
|
||||
metadata {
|
||||
name = "oauth2-proxy-secrets"
|
||||
namespace = kubernetes_namespace.code_server.metadata[0].name
|
||||
}
|
||||
|
||||
data = {
|
||||
client-id = var.oauth2_client_id
|
||||
client-secret = var.oauth2_client_secret
|
||||
cookie-secret = var.oauth2_cookie_secret
|
||||
}
|
||||
|
||||
type = "Opaque"
|
||||
}
|
||||
|
||||
# Storage Class for EBS GP3
|
||||
resource "kubernetes_storage_class" "gp3" {
|
||||
metadata {
|
||||
name = "gp3"
|
||||
}
|
||||
|
||||
storage_provisioner = "ebs.csi.aws.com"
|
||||
volume_binding_mode = "WaitForFirstConsumer"
|
||||
|
||||
parameters = {
|
||||
type = "gp3"
|
||||
encrypted = "true"
|
||||
kmsKeyId = module.security.kms_key_arn
|
||||
}
|
||||
|
||||
depends_on = [module.eks]
|
||||
}
|
||||
110
terraform/deployments/eks/outputs.tf
Normal file
110
terraform/deployments/eks/outputs.tf
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
# EKS Deployment Outputs
|
||||
|
||||
output "vpc_id" {
|
||||
description = "ID of the VPC"
|
||||
value = module.vpc.vpc_id
|
||||
}
|
||||
|
||||
output "private_subnet_ids" {
|
||||
description = "IDs of private subnets"
|
||||
value = module.vpc.private_subnet_ids
|
||||
}
|
||||
|
||||
output "public_subnet_ids" {
|
||||
description = "IDs of public subnets"
|
||||
value = module.vpc.public_subnet_ids
|
||||
}
|
||||
|
||||
output "eks_cluster_id" {
|
||||
description = "ID of the EKS cluster"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "eks_cluster_endpoint" {
|
||||
description = "Endpoint of the EKS cluster"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "eks_cluster_arn" {
|
||||
description = "ARN of the EKS cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "eks_cluster_oidc_issuer_url" {
|
||||
description = "OIDC issuer URL of the EKS cluster"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "kms_key_arn" {
|
||||
description = "ARN of the KMS key for encryption"
|
||||
value = module.security.kms_key_arn
|
||||
}
|
||||
|
||||
output "configure_kubectl" {
|
||||
description = "Command to configure kubectl"
|
||||
value = "aws eks update-kubeconfig --region ${var.aws_region} --name ${module.eks.cluster_id}"
|
||||
}
|
||||
|
||||
output "next_steps" {
|
||||
description = "Next steps to complete the setup"
|
||||
value = <<-EOT
|
||||
|
||||
Code-Server EKS Deployment Complete!
|
||||
|
||||
Next Steps:
|
||||
|
||||
1. Configure kubectl to access the cluster:
|
||||
${join("\n ", [
|
||||
"aws eks update-kubeconfig --region ${var.aws_region} --name ${module.eks.cluster_id}",
|
||||
"kubectl get nodes # Verify nodes are ready"
|
||||
])}
|
||||
|
||||
2. Deploy Code-Server using Helm:
|
||||
${join("\n ", [
|
||||
"cd k8s",
|
||||
"# Edit code-server-values.yaml with your configuration",
|
||||
"helm upgrade --install code-server ../../ci/helm-chart \\",
|
||||
" --namespace code-server \\",
|
||||
" --create-namespace \\",
|
||||
" --values code-server-values.yaml"
|
||||
])}
|
||||
|
||||
3. (Optional) Deploy OAuth2 Proxy for SAML authentication:
|
||||
${join("\n ", [
|
||||
"# Edit k8s/oauth2-proxy.yaml with your SAML/OIDC configuration",
|
||||
"kubectl apply -f k8s/oauth2-proxy.yaml"
|
||||
])}
|
||||
|
||||
4. Get the Load Balancer URL:
|
||||
${join("\n ", [
|
||||
"kubectl get ingress -n code-server",
|
||||
"# Wait for ADDRESS to be populated",
|
||||
"# The URL will be in the format: xxxxx.region.elb.amazonaws.com"
|
||||
])}
|
||||
|
||||
5. Configure DNS (if using custom domain):
|
||||
${join("\n ", [
|
||||
"# Create a CNAME record pointing to the ALB DNS name",
|
||||
"# Update the ingress configuration with your domain"
|
||||
])}
|
||||
|
||||
6. Monitor the deployment:
|
||||
${join("\n ", [
|
||||
"kubectl get pods -n code-server",
|
||||
"kubectl logs -n code-server -l app.kubernetes.io/name=code-server",
|
||||
"kubectl describe ingress -n code-server"
|
||||
])}
|
||||
|
||||
Security Notes:
|
||||
- All worker nodes are in private subnets
|
||||
- EKS API endpoint is ${var.endpoint_public_access ? "public" : "private"}
|
||||
- Data is encrypted at rest using KMS
|
||||
- VPC Flow Logs are enabled for monitoring
|
||||
- IRSA (IAM Roles for Service Accounts) is enabled
|
||||
|
||||
Useful Commands:
|
||||
- Scale nodes: kubectl scale deployment code-server -n code-server --replicas=3
|
||||
- View logs: kubectl logs -n code-server -f deployment/code-server
|
||||
- Port forward (testing): kubectl port-forward -n code-server svc/code-server 8080:8080
|
||||
EOT
|
||||
}
|
||||
38
terraform/deployments/eks/terraform.tfvars.example
Normal file
38
terraform/deployments/eks/terraform.tfvars.example
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# Example Terraform Variables for EKS Deployment
|
||||
# Copy this file to terraform.tfvars and fill in your values
|
||||
|
||||
aws_region = "us-east-1"
|
||||
project_name = "code-server"
|
||||
environment = "dev"
|
||||
|
||||
# VPC Configuration
|
||||
vpc_cidr = "10.0.0.0/16"
|
||||
public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
private_subnet_cidrs = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
single_nat_gateway = false # Set to true for cost optimization
|
||||
|
||||
# Security Configuration
|
||||
allowed_cidr_blocks = ["10.0.0.0/8"] # Restrict to private network
|
||||
ssh_allowed_cidr_blocks = [] # No SSH access (use SSM instead)
|
||||
|
||||
# EKS Configuration
|
||||
kubernetes_version = "1.28"
|
||||
endpoint_public_access = false # Set to true if you need public API access
|
||||
public_access_cidrs = ["0.0.0.0/0"] # Restrict this if endpoint_public_access is true
|
||||
|
||||
# Node Group Configuration
|
||||
node_instance_types = ["t3.medium"]
|
||||
capacity_type = "ON_DEMAND" # or "SPOT" for cost optimization
|
||||
node_disk_size = 50
|
||||
min_nodes = 1
|
||||
max_nodes = 3
|
||||
desired_nodes = 2
|
||||
|
||||
# OAuth2 / SAML Configuration
|
||||
deploy_oauth2_proxy = true
|
||||
oauth2_client_id = "your-client-id-from-idp"
|
||||
oauth2_client_secret = "your-client-secret-from-idp"
|
||||
|
||||
# Generate cookie secret with:
|
||||
# python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())'
|
||||
oauth2_cookie_secret = "generate-random-secret-here"
|
||||
139
terraform/deployments/eks/variables.tf
Normal file
139
terraform/deployments/eks/variables.tf
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
# EKS Deployment Variables
|
||||
|
||||
variable "aws_region" {
|
||||
description = "AWS region for deployment"
|
||||
type = string
|
||||
default = "us-east-1"
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "Project name"
|
||||
type = string
|
||||
default = "code-server"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment name (dev, staging, prod)"
|
||||
type = string
|
||||
default = "dev"
|
||||
}
|
||||
|
||||
# VPC Configuration
|
||||
variable "vpc_cidr" {
|
||||
description = "CIDR block for VPC"
|
||||
type = string
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "public_subnet_cidrs" {
|
||||
description = "CIDR blocks for public subnets"
|
||||
type = list(string)
|
||||
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
}
|
||||
|
||||
variable "private_subnet_cidrs" {
|
||||
description = "CIDR blocks for private subnets"
|
||||
type = list(string)
|
||||
default = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
}
|
||||
|
||||
variable "single_nat_gateway" {
|
||||
description = "Use a single NAT gateway (cost optimization)"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
# Security Configuration
|
||||
variable "allowed_cidr_blocks" {
|
||||
description = "CIDR blocks allowed to access the ALB"
|
||||
type = list(string)
|
||||
default = ["10.0.0.0/8"] # Restrict to private network
|
||||
}
|
||||
|
||||
variable "ssh_allowed_cidr_blocks" {
|
||||
description = "CIDR blocks allowed to SSH into nodes"
|
||||
type = list(string)
|
||||
default = [] # No SSH access by default
|
||||
}
|
||||
|
||||
# EKS Configuration
|
||||
variable "kubernetes_version" {
|
||||
description = "Kubernetes version for EKS cluster"
|
||||
type = string
|
||||
default = "1.28"
|
||||
}
|
||||
|
||||
variable "endpoint_public_access" {
|
||||
description = "Enable public access to EKS API endpoint"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "public_access_cidrs" {
|
||||
description = "CIDR blocks allowed to access EKS API endpoint"
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "node_instance_types" {
|
||||
description = "Instance types for EKS nodes"
|
||||
type = list(string)
|
||||
default = ["t3.medium"]
|
||||
}
|
||||
|
||||
variable "capacity_type" {
|
||||
description = "Capacity type for EKS nodes (ON_DEMAND or SPOT)"
|
||||
type = string
|
||||
default = "ON_DEMAND"
|
||||
}
|
||||
|
||||
variable "node_disk_size" {
|
||||
description = "Disk size for EKS nodes in GB"
|
||||
type = number
|
||||
default = 50
|
||||
}
|
||||
|
||||
variable "min_nodes" {
|
||||
description = "Minimum number of EKS nodes"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "max_nodes" {
|
||||
description = "Maximum number of EKS nodes"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "desired_nodes" {
|
||||
description = "Desired number of EKS nodes"
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
# OAuth2 / SAML Configuration
|
||||
variable "deploy_oauth2_proxy" {
|
||||
description = "Deploy OAuth2 Proxy for authentication"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "oauth2_client_id" {
|
||||
description = "OAuth2 client ID from your SAML/OIDC provider"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "oauth2_client_secret" {
|
||||
description = "OAuth2 client secret from your SAML/OIDC provider"
|
||||
type = string
|
||||
sensitive = true
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "oauth2_cookie_secret" {
|
||||
description = "OAuth2 cookie secret (generate with: python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())')"
|
||||
type = string
|
||||
sensitive = true
|
||||
default = ""
|
||||
}
|
||||
346
terraform/modules/ec2/main.tf
Normal file
346
terraform/modules/ec2/main.tf
Normal file
|
|
@ -0,0 +1,346 @@
|
|||
# EC2 Module for Code-Server Deployment
|
||||
# Deploys code-server on EC2 instances with Auto Scaling, ALB, and OAuth2 Proxy
|
||||
|
||||
# Get latest Amazon Linux 2023 AMI
|
||||
data "aws_ami" "amazon_linux" {
|
||||
most_recent = true
|
||||
owners = ["amazon"]
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["al2023-ami-*-x86_64"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
}
|
||||
|
||||
# Generate random password for code-server if not provided
|
||||
resource "random_password" "code_server" {
|
||||
count = var.code_server_password == "" ? 1 : 0
|
||||
length = 32
|
||||
special = true
|
||||
}
|
||||
|
||||
# Store password in AWS Secrets Manager
|
||||
resource "aws_secretsmanager_secret" "code_server_password" {
|
||||
name = "${var.name_prefix}-code-server-password"
|
||||
description = "Code-Server password"
|
||||
recovery_window_in_days = 7
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_secretsmanager_secret_version" "code_server_password" {
|
||||
secret_id = aws_secretsmanager_secret.code_server_password.id
|
||||
secret_string = var.code_server_password != "" ? var.code_server_password : random_password.code_server[0].result
|
||||
}
|
||||
|
||||
# User data script for EC2 instances
|
||||
locals {
|
||||
user_data = templatefile("${path.module}/user-data.sh", {
|
||||
code_server_version = var.code_server_version
|
||||
region = var.aws_region
|
||||
secret_name = aws_secretsmanager_secret.code_server_password.name
|
||||
oauth2_client_id = var.oauth2_client_id
|
||||
oauth2_client_secret = var.oauth2_client_secret
|
||||
oauth2_issuer_url = var.oauth2_issuer_url
|
||||
oauth2_redirect_url = var.oauth2_redirect_url
|
||||
cookie_secret = var.oauth2_cookie_secret
|
||||
allowed_emails = join(",", var.oauth2_allowed_emails)
|
||||
})
|
||||
}
|
||||
|
||||
# Launch Template
|
||||
resource "aws_launch_template" "code_server" {
|
||||
name_prefix = "${var.name_prefix}-code-server-"
|
||||
image_id = var.ami_id != "" ? var.ami_id : data.aws_ami.amazon_linux.id
|
||||
instance_type = var.instance_type
|
||||
|
||||
iam_instance_profile {
|
||||
name = var.iam_instance_profile_name
|
||||
}
|
||||
|
||||
vpc_security_group_ids = [var.security_group_id]
|
||||
|
||||
user_data = base64encode(local.user_data)
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "/dev/xvda"
|
||||
|
||||
ebs {
|
||||
volume_size = var.ebs_volume_size
|
||||
volume_type = var.ebs_volume_type
|
||||
encrypted = true
|
||||
kms_key_id = var.kms_key_arn
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
metadata_options {
|
||||
http_endpoint = "enabled"
|
||||
http_tokens = "required"
|
||||
http_put_response_hop_limit = 1
|
||||
instance_metadata_tags = "enabled"
|
||||
}
|
||||
|
||||
monitoring {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "instance"
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-code-server"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "volume"
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-code-server-volume"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Auto Scaling Group
|
||||
resource "aws_autoscaling_group" "code_server" {
|
||||
name = "${var.name_prefix}-code-server-asg"
|
||||
vpc_zone_identifier = var.subnet_ids
|
||||
target_group_arns = [aws_lb_target_group.code_server.arn, aws_lb_target_group.oauth2_proxy.arn]
|
||||
health_check_type = "ELB"
|
||||
health_check_grace_period = 300
|
||||
min_size = var.min_instances
|
||||
max_size = var.max_instances
|
||||
desired_capacity = var.desired_instances
|
||||
|
||||
launch_template {
|
||||
id = aws_launch_template.code_server.id
|
||||
version = "$Latest"
|
||||
}
|
||||
|
||||
tag {
|
||||
key = "Name"
|
||||
value = "${var.name_prefix}-code-server"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
dynamic "tag" {
|
||||
for_each = var.tags
|
||||
content {
|
||||
key = tag.key
|
||||
value = tag.value
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = [desired_capacity]
|
||||
}
|
||||
}
|
||||
|
||||
# Application Load Balancer
|
||||
resource "aws_lb" "code_server" {
|
||||
name = "${var.name_prefix}-code-server-alb"
|
||||
internal = var.internal_alb
|
||||
load_balancer_type = "application"
|
||||
security_groups = [var.alb_security_group_id]
|
||||
subnets = var.alb_subnet_ids
|
||||
|
||||
enable_deletion_protection = var.enable_deletion_protection
|
||||
enable_http2 = true
|
||||
enable_cross_zone_load_balancing = true
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-code-server-alb"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Target Group for OAuth2 Proxy
|
||||
resource "aws_lb_target_group" "oauth2_proxy" {
|
||||
name = "${var.name_prefix}-oauth2-tg"
|
||||
port = 4180
|
||||
protocol = "HTTP"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
health_check {
|
||||
enabled = true
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 5
|
||||
interval = 30
|
||||
path = "/ping"
|
||||
matcher = "200"
|
||||
}
|
||||
|
||||
deregistration_delay = 30
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-oauth2-tg"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Target Group for Code-Server
|
||||
resource "aws_lb_target_group" "code_server" {
|
||||
name = "${var.name_prefix}-code-server-tg"
|
||||
port = 8080
|
||||
protocol = "HTTP"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
health_check {
|
||||
enabled = true
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 5
|
||||
interval = 30
|
||||
path = "/healthz"
|
||||
matcher = "200"
|
||||
}
|
||||
|
||||
deregistration_delay = 30
|
||||
|
||||
stickiness {
|
||||
type = "lb_cookie"
|
||||
cookie_duration = 86400
|
||||
enabled = true
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-code-server-tg"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# HTTPS Listener (primary)
|
||||
resource "aws_lb_listener" "https" {
|
||||
count = var.certificate_arn != "" ? 1 : 0
|
||||
load_balancer_arn = aws_lb.code_server.arn
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
ssl_policy = "ELBSecurityPolicy-TLS-1-2-2017-01"
|
||||
certificate_arn = var.certificate_arn
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.oauth2_proxy.arn
|
||||
}
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# HTTP Listener (redirect to HTTPS)
|
||||
resource "aws_lb_listener" "http" {
|
||||
load_balancer_arn = aws_lb.code_server.arn
|
||||
port = "80"
|
||||
protocol = "HTTP"
|
||||
|
||||
default_action {
|
||||
type = var.certificate_arn != "" ? "redirect" : "forward"
|
||||
|
||||
dynamic "redirect" {
|
||||
for_each = var.certificate_arn != "" ? [1] : []
|
||||
content {
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
status_code = "HTTP_301"
|
||||
}
|
||||
}
|
||||
|
||||
target_group_arn = var.certificate_arn == "" ? aws_lb_target_group.oauth2_proxy.arn : null
|
||||
}
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# CloudWatch Log Group for Code-Server
|
||||
resource "aws_cloudwatch_log_group" "code_server" {
|
||||
name = "/aws/ec2/${var.name_prefix}-code-server"
|
||||
retention_in_days = var.log_retention_days
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Auto Scaling Policies
|
||||
resource "aws_autoscaling_policy" "scale_up" {
|
||||
count = var.enable_autoscaling ? 1 : 0
|
||||
name = "${var.name_prefix}-code-server-scale-up"
|
||||
autoscaling_group_name = aws_autoscaling_group.code_server.name
|
||||
adjustment_type = "ChangeInCapacity"
|
||||
scaling_adjustment = 1
|
||||
cooldown = 300
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_policy" "scale_down" {
|
||||
count = var.enable_autoscaling ? 1 : 0
|
||||
name = "${var.name_prefix}-code-server-scale-down"
|
||||
autoscaling_group_name = aws_autoscaling_group.code_server.name
|
||||
adjustment_type = "ChangeInCapacity"
|
||||
scaling_adjustment = -1
|
||||
cooldown = 300
|
||||
}
|
||||
|
||||
# CloudWatch Alarms for Auto Scaling
|
||||
resource "aws_cloudwatch_metric_alarm" "cpu_high" {
|
||||
count = var.enable_autoscaling ? 1 : 0
|
||||
alarm_name = "${var.name_prefix}-code-server-cpu-high"
|
||||
comparison_operator = "GreaterThanThreshold"
|
||||
evaluation_periods = "2"
|
||||
metric_name = "CPUUtilization"
|
||||
namespace = "AWS/EC2"
|
||||
period = "300"
|
||||
statistic = "Average"
|
||||
threshold = "80"
|
||||
|
||||
dimensions = {
|
||||
AutoScalingGroupName = aws_autoscaling_group.code_server.name
|
||||
}
|
||||
|
||||
alarm_description = "This metric monitors ec2 cpu utilization"
|
||||
alarm_actions = [aws_autoscaling_policy.scale_up[0].arn]
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_metric_alarm" "cpu_low" {
|
||||
count = var.enable_autoscaling ? 1 : 0
|
||||
alarm_name = "${var.name_prefix}-code-server-cpu-low"
|
||||
comparison_operator = "LessThanThreshold"
|
||||
evaluation_periods = "2"
|
||||
metric_name = "CPUUtilization"
|
||||
namespace = "AWS/EC2"
|
||||
period = "300"
|
||||
statistic = "Average"
|
||||
threshold = "20"
|
||||
|
||||
dimensions = {
|
||||
AutoScalingGroupName = aws_autoscaling_group.code_server.name
|
||||
}
|
||||
|
||||
alarm_description = "This metric monitors ec2 cpu utilization"
|
||||
alarm_actions = [aws_autoscaling_policy.scale_down[0].arn]
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
41
terraform/modules/ec2/outputs.tf
Normal file
41
terraform/modules/ec2/outputs.tf
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# EC2 Module Outputs
|
||||
|
||||
output "alb_dns_name" {
|
||||
description = "DNS name of the Application Load Balancer"
|
||||
value = aws_lb.code_server.dns_name
|
||||
}
|
||||
|
||||
output "alb_arn" {
|
||||
description = "ARN of the Application Load Balancer"
|
||||
value = aws_lb.code_server.arn
|
||||
}
|
||||
|
||||
output "alb_zone_id" {
|
||||
description = "Zone ID of the Application Load Balancer"
|
||||
value = aws_lb.code_server.zone_id
|
||||
}
|
||||
|
||||
output "autoscaling_group_name" {
|
||||
description = "Name of the Auto Scaling Group"
|
||||
value = aws_autoscaling_group.code_server.name
|
||||
}
|
||||
|
||||
output "autoscaling_group_arn" {
|
||||
description = "ARN of the Auto Scaling Group"
|
||||
value = aws_autoscaling_group.code_server.arn
|
||||
}
|
||||
|
||||
output "launch_template_id" {
|
||||
description = "ID of the Launch Template"
|
||||
value = aws_launch_template.code_server.id
|
||||
}
|
||||
|
||||
output "code_server_password_secret_arn" {
|
||||
description = "ARN of the Secrets Manager secret containing code-server password"
|
||||
value = aws_secretsmanager_secret.code_server_password.arn
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of the CloudWatch Log Group"
|
||||
value = aws_cloudwatch_log_group.code_server.name
|
||||
}
|
||||
212
terraform/modules/ec2/user-data.sh
Normal file
212
terraform/modules/ec2/user-data.sh
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
#!/bin/bash
|
||||
# User data script for Code-Server EC2 instances
|
||||
# This script installs code-server, oauth2-proxy, and configures them
|
||||
|
||||
set -e
|
||||
|
||||
# Update system
|
||||
yum update -y
|
||||
|
||||
# Install dependencies
|
||||
yum install -y docker git wget curl jq
|
||||
|
||||
# Start and enable Docker
|
||||
systemctl start docker
|
||||
systemctl enable docker
|
||||
|
||||
# Add ec2-user to docker group
|
||||
usermod -aG docker ec2-user
|
||||
|
||||
# Install CloudWatch Agent
|
||||
wget https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm
|
||||
rpm -U ./amazon-cloudwatch-agent.rpm
|
||||
rm -f ./amazon-cloudwatch-agent.rpm
|
||||
|
||||
# Configure CloudWatch Agent
|
||||
cat > /opt/aws/amazon-cloudwatch-agent/etc/config.json <<EOF
|
||||
{
|
||||
"logs": {
|
||||
"logs_collected": {
|
||||
"files": {
|
||||
"collect_list": [
|
||||
{
|
||||
"file_path": "/var/log/code-server.log",
|
||||
"log_group_name": "/aws/ec2/${var.name_prefix}-code-server",
|
||||
"log_stream_name": "{instance_id}/code-server"
|
||||
},
|
||||
{
|
||||
"file_path": "/var/log/oauth2-proxy.log",
|
||||
"log_group_name": "/aws/ec2/${var.name_prefix}-code-server",
|
||||
"log_stream_name": "{instance_id}/oauth2-proxy"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metrics": {
|
||||
"namespace": "CodeServer",
|
||||
"metrics_collected": {
|
||||
"cpu": {
|
||||
"measurement": [
|
||||
{"name": "cpu_usage_idle", "rename": "CPU_IDLE", "unit": "Percent"},
|
||||
{"name": "cpu_usage_iowait", "rename": "CPU_IOWAIT", "unit": "Percent"},
|
||||
"cpu_time_guest"
|
||||
],
|
||||
"metrics_collection_interval": 60,
|
||||
"totalcpu": false
|
||||
},
|
||||
"disk": {
|
||||
"measurement": [
|
||||
{"name": "used_percent", "rename": "DISK_USED", "unit": "Percent"}
|
||||
],
|
||||
"metrics_collection_interval": 60,
|
||||
"resources": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
"mem": {
|
||||
"measurement": [
|
||||
{"name": "mem_used_percent", "rename": "MEM_USED", "unit": "Percent"}
|
||||
],
|
||||
"metrics_collection_interval": 60
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Start CloudWatch Agent
|
||||
/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \
|
||||
-a fetch-config \
|
||||
-m ec2 \
|
||||
-s \
|
||||
-c file:/opt/aws/amazon-cloudwatch-agent/etc/config.json
|
||||
|
||||
# Get code-server password from Secrets Manager
|
||||
CODE_SERVER_PASSWORD=$(aws secretsmanager get-secret-value \
|
||||
--secret-id ${secret_name} \
|
||||
--region ${region} \
|
||||
--query SecretString \
|
||||
--output text)
|
||||
|
||||
# Create docker-compose configuration
|
||||
mkdir -p /opt/code-server
|
||||
cat > /opt/code-server/docker-compose.yml <<EOF
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
code-server:
|
||||
image: codercom/code-server:${code_server_version}
|
||||
container_name: code-server
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- /home/ec2-user/workspace:/home/coder/workspace
|
||||
- /home/ec2-user/.config:/home/coder/.config
|
||||
environment:
|
||||
- PASSWORD=$CODE_SERVER_PASSWORD
|
||||
- SUDO_PASSWORD=$CODE_SERVER_PASSWORD
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
oauth2-proxy:
|
||||
image: quay.io/oauth2-proxy/oauth2-proxy:latest
|
||||
container_name: oauth2-proxy
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "4180:4180"
|
||||
command:
|
||||
- --provider=oidc
|
||||
- --email-domain=*
|
||||
- --upstream=http://code-server:8080
|
||||
- --http-address=0.0.0.0:4180
|
||||
- --redirect-url=${oauth2_redirect_url}
|
||||
- --oidc-issuer-url=${oauth2_issuer_url}
|
||||
- --cookie-secret=${cookie_secret}
|
||||
- --cookie-secure=true
|
||||
- --cookie-httponly=true
|
||||
- --cookie-samesite=lax
|
||||
- --set-xauthrequest=true
|
||||
- --pass-access-token=true
|
||||
- --pass-authorization-header=true
|
||||
- --set-authorization-header=true
|
||||
- --skip-provider-button=false
|
||||
%{ if allowed_emails != "" ~}
|
||||
- --authenticated-emails-file=/etc/oauth2-proxy/emails.txt
|
||||
%{ endif ~}
|
||||
environment:
|
||||
- OAUTH2_PROXY_CLIENT_ID=${oauth2_client_id}
|
||||
- OAUTH2_PROXY_CLIENT_SECRET=${oauth2_client_secret}
|
||||
%{ if allowed_emails != "" ~}
|
||||
volumes:
|
||||
- /opt/code-server/allowed-emails.txt:/etc/oauth2-proxy/emails.txt:ro
|
||||
%{ endif ~}
|
||||
depends_on:
|
||||
- code-server
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
EOF
|
||||
|
||||
%{ if allowed_emails != "" ~}
|
||||
# Create allowed emails file
|
||||
cat > /opt/code-server/allowed-emails.txt <<EOF
|
||||
$(echo "${allowed_emails}" | tr ',' '\n')
|
||||
EOF
|
||||
%{ endif ~}
|
||||
|
||||
# Create workspace directory
|
||||
mkdir -p /home/ec2-user/workspace
|
||||
mkdir -p /home/ec2-user/.config
|
||||
chown -R 1000:1000 /home/ec2-user/workspace /home/ec2-user/.config
|
||||
|
||||
# Install docker-compose
|
||||
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" \
|
||||
-o /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
# Start services
|
||||
cd /opt/code-server
|
||||
docker-compose up -d
|
||||
|
||||
# Create systemd service for docker-compose
|
||||
cat > /etc/systemd/system/code-server.service <<EOF
|
||||
[Unit]
|
||||
Description=Code-Server Docker Compose
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/code-server
|
||||
ExecStart=/usr/local/bin/docker-compose up -d
|
||||
ExecStop=/usr/local/bin/docker-compose down
|
||||
TimeoutStartSec=0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Enable and start the service
|
||||
systemctl daemon-reload
|
||||
systemctl enable code-server.service
|
||||
|
||||
# Create log files
|
||||
touch /var/log/code-server.log
|
||||
touch /var/log/oauth2-proxy.log
|
||||
chmod 644 /var/log/code-server.log /var/log/oauth2-proxy.log
|
||||
|
||||
# Set up log forwarding from Docker containers
|
||||
cat > /etc/cron.d/code-server-logs <<EOF
|
||||
* * * * * root docker logs code-server --tail 100 >> /var/log/code-server.log 2>&1
|
||||
* * * * * root docker logs oauth2-proxy --tail 100 >> /var/log/oauth2-proxy.log 2>&1
|
||||
EOF
|
||||
|
||||
echo "Code-Server installation completed successfully!"
|
||||
171
terraform/modules/ec2/variables.tf
Normal file
171
terraform/modules/ec2/variables.tf
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
# EC2 Module Variables
|
||||
|
||||
variable "name_prefix" {
|
||||
description = "Prefix for resource names"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
description = "ID of the VPC"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subnet_ids" {
|
||||
description = "List of subnet IDs for EC2 instances"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "alb_subnet_ids" {
|
||||
description = "List of subnet IDs for ALB"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "security_group_id" {
|
||||
description = "Security group ID for EC2 instances"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "alb_security_group_id" {
|
||||
description = "Security group ID for ALB"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "iam_instance_profile_name" {
|
||||
description = "IAM instance profile name for EC2 instances"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "kms_key_arn" {
|
||||
description = "ARN of KMS key for EBS encryption"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "aws_region" {
|
||||
description = "AWS region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "EC2 instance type"
|
||||
type = string
|
||||
default = "t3.medium"
|
||||
}
|
||||
|
||||
variable "ami_id" {
|
||||
description = "AMI ID for EC2 instances (leave empty for latest Amazon Linux 2023)"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "ebs_volume_size" {
|
||||
description = "Size of EBS volume in GB"
|
||||
type = number
|
||||
default = 50
|
||||
}
|
||||
|
||||
variable "ebs_volume_type" {
|
||||
description = "Type of EBS volume"
|
||||
type = string
|
||||
default = "gp3"
|
||||
}
|
||||
|
||||
variable "min_instances" {
|
||||
description = "Minimum number of instances in ASG"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "max_instances" {
|
||||
description = "Maximum number of instances in ASG"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "desired_instances" {
|
||||
description = "Desired number of instances in ASG"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "code_server_version" {
|
||||
description = "Version of code-server to install"
|
||||
type = string
|
||||
default = "latest"
|
||||
}
|
||||
|
||||
variable "code_server_password" {
|
||||
description = "Password for code-server (leave empty for auto-generated)"
|
||||
type = string
|
||||
default = ""
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "certificate_arn" {
|
||||
description = "ARN of ACM certificate for HTTPS"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "internal_alb" {
|
||||
description = "Whether the ALB should be internal"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_deletion_protection" {
|
||||
description = "Enable deletion protection for ALB"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "log_retention_days" {
|
||||
description = "Number of days to retain CloudWatch logs"
|
||||
type = number
|
||||
default = 30
|
||||
}
|
||||
|
||||
variable "enable_autoscaling" {
|
||||
description = "Enable auto scaling based on CPU metrics"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
# OAuth2 Proxy variables
|
||||
variable "oauth2_client_id" {
|
||||
description = "OAuth2 client ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "oauth2_client_secret" {
|
||||
description = "OAuth2 client secret"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "oauth2_issuer_url" {
|
||||
description = "OAuth2 issuer URL (OIDC/SAML endpoint)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "oauth2_redirect_url" {
|
||||
description = "OAuth2 redirect URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "oauth2_cookie_secret" {
|
||||
description = "OAuth2 cookie secret (generate with: python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())')"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "oauth2_allowed_emails" {
|
||||
description = "List of allowed email addresses for OAuth2"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
|
@ -0,0 +1,241 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"iam:CreateServiceLinkedRole"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"iam:AWSServiceName": "elasticloadbalancing.amazonaws.com"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeAccountAttributes",
|
||||
"ec2:DescribeAddresses",
|
||||
"ec2:DescribeAvailabilityZones",
|
||||
"ec2:DescribeInternetGateways",
|
||||
"ec2:DescribeVpcs",
|
||||
"ec2:DescribeVpcPeeringConnections",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:DescribeSecurityGroups",
|
||||
"ec2:DescribeInstances",
|
||||
"ec2:DescribeNetworkInterfaces",
|
||||
"ec2:DescribeTags",
|
||||
"ec2:GetCoipPoolUsage",
|
||||
"ec2:DescribeCoipPools",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:DescribeLoadBalancerAttributes",
|
||||
"elasticloadbalancing:DescribeListeners",
|
||||
"elasticloadbalancing:DescribeListenerCertificates",
|
||||
"elasticloadbalancing:DescribeSSLPolicies",
|
||||
"elasticloadbalancing:DescribeRules",
|
||||
"elasticloadbalancing:DescribeTargetGroups",
|
||||
"elasticloadbalancing:DescribeTargetGroupAttributes",
|
||||
"elasticloadbalancing:DescribeTargetHealth",
|
||||
"elasticloadbalancing:DescribeTags"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"cognito-idp:DescribeUserPoolClient",
|
||||
"acm:ListCertificates",
|
||||
"acm:DescribeCertificate",
|
||||
"iam:ListServerCertificates",
|
||||
"iam:GetServerCertificate",
|
||||
"waf-regional:GetWebACL",
|
||||
"waf-regional:GetWebACLForResource",
|
||||
"waf-regional:AssociateWebACL",
|
||||
"waf-regional:DisassociateWebACL",
|
||||
"wafv2:GetWebACL",
|
||||
"wafv2:GetWebACLForResource",
|
||||
"wafv2:AssociateWebACL",
|
||||
"wafv2:DisassociateWebACL",
|
||||
"shield:GetSubscriptionState",
|
||||
"shield:DescribeProtection",
|
||||
"shield:CreateProtection",
|
||||
"shield:DeleteProtection"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:RevokeSecurityGroupIngress"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:CreateSecurityGroup"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:CreateTags"
|
||||
],
|
||||
"Resource": "arn:aws:ec2:*:*:security-group/*",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"ec2:CreateAction": "CreateSecurityGroup"
|
||||
},
|
||||
"Null": {
|
||||
"aws:RequestTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:CreateTags",
|
||||
"ec2:DeleteTags"
|
||||
],
|
||||
"Resource": "arn:aws:ec2:*:*:security-group/*",
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"aws:RequestTag/elbv2.k8s.aws/cluster": "true",
|
||||
"aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:RevokeSecurityGroupIngress",
|
||||
"ec2:DeleteSecurityGroup"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:CreateLoadBalancer",
|
||||
"elasticloadbalancing:CreateTargetGroup"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"aws:RequestTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:CreateListener",
|
||||
"elasticloadbalancing:DeleteListener",
|
||||
"elasticloadbalancing:CreateRule",
|
||||
"elasticloadbalancing:DeleteRule"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:AddTags",
|
||||
"elasticloadbalancing:RemoveTags"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:elasticloadbalancing:*:*:targetgroup/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*"
|
||||
],
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"aws:RequestTag/elbv2.k8s.aws/cluster": "true",
|
||||
"aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:AddTags",
|
||||
"elasticloadbalancing:RemoveTags"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:ModifyLoadBalancerAttributes",
|
||||
"elasticloadbalancing:SetIpAddressType",
|
||||
"elasticloadbalancing:SetSecurityGroups",
|
||||
"elasticloadbalancing:SetSubnets",
|
||||
"elasticloadbalancing:DeleteLoadBalancer",
|
||||
"elasticloadbalancing:ModifyTargetGroup",
|
||||
"elasticloadbalancing:ModifyTargetGroupAttributes",
|
||||
"elasticloadbalancing:DeleteTargetGroup"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:AddTags"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:elasticloadbalancing:*:*:targetgroup/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*",
|
||||
"arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*"
|
||||
],
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"elasticloadbalancing:CreateAction": [
|
||||
"CreateTargetGroup",
|
||||
"CreateLoadBalancer"
|
||||
]
|
||||
},
|
||||
"Null": {
|
||||
"aws:RequestTag/elbv2.k8s.aws/cluster": "false"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:RegisterTargets",
|
||||
"elasticloadbalancing:DeregisterTargets"
|
||||
],
|
||||
"Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:SetWebAcl",
|
||||
"elasticloadbalancing:ModifyListener",
|
||||
"elasticloadbalancing:AddListenerCertificates",
|
||||
"elasticloadbalancing:RemoveListenerCertificates",
|
||||
"elasticloadbalancing:ModifyRule"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
232
terraform/modules/eks/main.tf
Normal file
232
terraform/modules/eks/main.tf
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
# EKS Module for Code-Server Deployment
|
||||
# Creates an EKS cluster with managed node groups in private subnets
|
||||
|
||||
# EKS Cluster
|
||||
resource "aws_eks_cluster" "main" {
|
||||
name = var.cluster_name
|
||||
role_arn = var.cluster_role_arn
|
||||
version = var.kubernetes_version
|
||||
|
||||
vpc_config {
|
||||
subnet_ids = concat(var.private_subnet_ids, var.public_subnet_ids)
|
||||
endpoint_private_access = true
|
||||
endpoint_public_access = var.endpoint_public_access
|
||||
public_access_cidrs = var.endpoint_public_access ? var.public_access_cidrs : []
|
||||
security_group_ids = [var.cluster_security_group_id]
|
||||
}
|
||||
|
||||
encryption_config {
|
||||
provider {
|
||||
key_arn = var.kms_key_arn
|
||||
}
|
||||
resources = ["secrets"]
|
||||
}
|
||||
|
||||
enabled_cluster_log_types = var.cluster_log_types
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = var.cluster_name
|
||||
}
|
||||
)
|
||||
|
||||
depends_on = [
|
||||
var.cluster_role_arn
|
||||
]
|
||||
}
|
||||
|
||||
# EKS Cluster Addons
|
||||
resource "aws_eks_addon" "vpc_cni" {
|
||||
cluster_name = aws_eks_cluster.main.name
|
||||
addon_name = "vpc-cni"
|
||||
addon_version = var.vpc_cni_version
|
||||
resolve_conflicts_on_create = "OVERWRITE"
|
||||
resolve_conflicts_on_update = "PRESERVE"
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_eks_addon" "kube_proxy" {
|
||||
cluster_name = aws_eks_cluster.main.name
|
||||
addon_name = "kube-proxy"
|
||||
addon_version = var.kube_proxy_version
|
||||
resolve_conflicts_on_create = "OVERWRITE"
|
||||
resolve_conflicts_on_update = "PRESERVE"
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_eks_addon" "coredns" {
|
||||
cluster_name = aws_eks_cluster.main.name
|
||||
addon_name = "coredns"
|
||||
addon_version = var.coredns_version
|
||||
resolve_conflicts_on_create = "OVERWRITE"
|
||||
resolve_conflicts_on_update = "PRESERVE"
|
||||
|
||||
tags = var.tags
|
||||
|
||||
depends_on = [
|
||||
aws_eks_node_group.main
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_eks_addon" "ebs_csi_driver" {
|
||||
count = var.enable_ebs_csi_driver ? 1 : 0
|
||||
cluster_name = aws_eks_cluster.main.name
|
||||
addon_name = "aws-ebs-csi-driver"
|
||||
addon_version = var.ebs_csi_driver_version
|
||||
resolve_conflicts_on_create = "OVERWRITE"
|
||||
resolve_conflicts_on_update = "PRESERVE"
|
||||
service_account_role_arn = aws_iam_role.ebs_csi_driver[0].arn
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# IAM Role for EBS CSI Driver
|
||||
resource "aws_iam_role" "ebs_csi_driver" {
|
||||
count = var.enable_ebs_csi_driver ? 1 : 0
|
||||
name = "${var.cluster_name}-ebs-csi-driver-role"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Federated = aws_iam_openid_connect_provider.eks[0].arn
|
||||
}
|
||||
Action = "sts:AssumeRoleWithWebIdentity"
|
||||
Condition = {
|
||||
StringEquals = {
|
||||
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:sub" = "system:serviceaccount:kube-system:ebs-csi-controller-sa"
|
||||
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:aud" = "sts.amazonaws.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "ebs_csi_driver" {
|
||||
count = var.enable_ebs_csi_driver ? 1 : 0
|
||||
role = aws_iam_role.ebs_csi_driver[0].name
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
|
||||
}
|
||||
|
||||
# OIDC Provider for EKS
|
||||
data "tls_certificate" "eks" {
|
||||
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
|
||||
}
|
||||
|
||||
resource "aws_iam_openid_connect_provider" "eks" {
|
||||
count = var.enable_irsa ? 1 : 0
|
||||
client_id_list = ["sts.amazonaws.com"]
|
||||
thumbprint_list = [data.tls_certificate.eks.certificates[0].sha1_fingerprint]
|
||||
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# EKS Node Group
|
||||
resource "aws_eks_node_group" "main" {
|
||||
cluster_name = aws_eks_cluster.main.name
|
||||
node_group_name = "${var.cluster_name}-node-group"
|
||||
node_role_arn = var.node_role_arn
|
||||
subnet_ids = var.private_subnet_ids
|
||||
version = var.kubernetes_version
|
||||
|
||||
scaling_config {
|
||||
desired_size = var.desired_nodes
|
||||
max_size = var.max_nodes
|
||||
min_size = var.min_nodes
|
||||
}
|
||||
|
||||
update_config {
|
||||
max_unavailable = 1
|
||||
}
|
||||
|
||||
instance_types = var.node_instance_types
|
||||
capacity_type = var.capacity_type
|
||||
disk_size = var.node_disk_size
|
||||
|
||||
labels = var.node_labels
|
||||
|
||||
dynamic "taint" {
|
||||
for_each = var.node_taints
|
||||
content {
|
||||
key = taint.value.key
|
||||
value = taint.value.value
|
||||
effect = taint.value.effect
|
||||
}
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.cluster_name}-node-group"
|
||||
}
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = [scaling_config[0].desired_size]
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.node_role_arn
|
||||
]
|
||||
}
|
||||
|
||||
# CloudWatch Log Group for EKS
|
||||
resource "aws_cloudwatch_log_group" "eks_cluster" {
|
||||
name = "/aws/eks/${var.cluster_name}/cluster"
|
||||
retention_in_days = var.log_retention_days
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# IAM Role for AWS Load Balancer Controller
|
||||
resource "aws_iam_role" "aws_load_balancer_controller" {
|
||||
count = var.enable_aws_load_balancer_controller ? 1 : 0
|
||||
name = "${var.cluster_name}-aws-load-balancer-controller"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Federated = var.enable_irsa ? aws_iam_openid_connect_provider.eks[0].arn : null
|
||||
}
|
||||
Action = "sts:AssumeRoleWithWebIdentity"
|
||||
Condition = {
|
||||
StringEquals = var.enable_irsa ? {
|
||||
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:sub" = "system:serviceaccount:kube-system:aws-load-balancer-controller"
|
||||
"${replace(aws_iam_openid_connect_provider.eks[0].url, "https://", "")}:aud" = "sts.amazonaws.com"
|
||||
} : {}
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "aws_load_balancer_controller" {
|
||||
count = var.enable_aws_load_balancer_controller ? 1 : 0
|
||||
name = "${var.cluster_name}-AWSLoadBalancerControllerIAMPolicy"
|
||||
description = "IAM policy for AWS Load Balancer Controller"
|
||||
|
||||
policy = file("${path.module}/iam-policy-aws-load-balancer-controller.json")
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "aws_load_balancer_controller" {
|
||||
count = var.enable_aws_load_balancer_controller ? 1 : 0
|
||||
role = aws_iam_role.aws_load_balancer_controller[0].name
|
||||
policy_arn = aws_iam_policy.aws_load_balancer_controller[0].arn
|
||||
}
|
||||
67
terraform/modules/eks/outputs.tf
Normal file
67
terraform/modules/eks/outputs.tf
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# EKS Module Outputs
|
||||
|
||||
output "cluster_id" {
|
||||
description = "ID of the EKS cluster"
|
||||
value = aws_eks_cluster.main.id
|
||||
}
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "ARN of the EKS cluster"
|
||||
value = aws_eks_cluster.main.arn
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint of the EKS cluster"
|
||||
value = aws_eks_cluster.main.endpoint
|
||||
}
|
||||
|
||||
output "cluster_version" {
|
||||
description = "Kubernetes version of the EKS cluster"
|
||||
value = aws_eks_cluster.main.version
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ID of the EKS cluster"
|
||||
value = aws_eks_cluster.main.vpc_config[0].cluster_security_group_id
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Certificate authority data for the EKS cluster"
|
||||
value = aws_eks_cluster.main.certificate_authority[0].data
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "OIDC issuer URL of the EKS cluster"
|
||||
value = aws_eks_cluster.main.identity[0].oidc[0].issuer
|
||||
}
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "ARN of the OIDC provider for EKS"
|
||||
value = var.enable_irsa ? aws_iam_openid_connect_provider.eks[0].arn : null
|
||||
}
|
||||
|
||||
output "node_group_id" {
|
||||
description = "ID of the EKS node group"
|
||||
value = aws_eks_node_group.main.id
|
||||
}
|
||||
|
||||
output "node_group_arn" {
|
||||
description = "ARN of the EKS node group"
|
||||
value = aws_eks_node_group.main.arn
|
||||
}
|
||||
|
||||
output "node_group_status" {
|
||||
description = "Status of the EKS node group"
|
||||
value = aws_eks_node_group.main.status
|
||||
}
|
||||
|
||||
output "aws_load_balancer_controller_role_arn" {
|
||||
description = "ARN of the AWS Load Balancer Controller IAM role"
|
||||
value = var.enable_aws_load_balancer_controller ? aws_iam_role.aws_load_balancer_controller[0].arn : null
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of the CloudWatch log group for EKS"
|
||||
value = aws_cloudwatch_log_group.eks_cluster.name
|
||||
}
|
||||
166
terraform/modules/eks/variables.tf
Normal file
166
terraform/modules/eks/variables.tf
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
# EKS Module Variables
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_role_arn" {
|
||||
description = "ARN of the IAM role for the EKS cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_role_arn" {
|
||||
description = "ARN of the IAM role for EKS nodes"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_subnet_ids" {
|
||||
description = "List of private subnet IDs for EKS nodes"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "public_subnet_ids" {
|
||||
description = "List of public subnet IDs for EKS control plane"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cluster_security_group_id" {
|
||||
description = "Security group ID for EKS cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "kms_key_arn" {
|
||||
description = "ARN of KMS key for EKS encryption"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
description = "Kubernetes version for EKS cluster"
|
||||
type = string
|
||||
default = "1.28"
|
||||
}
|
||||
|
||||
variable "endpoint_public_access" {
|
||||
description = "Enable public access to EKS cluster endpoint"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "public_access_cidrs" {
|
||||
description = "CIDR blocks allowed to access EKS cluster endpoint"
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "cluster_log_types" {
|
||||
description = "List of cluster log types to enable"
|
||||
type = list(string)
|
||||
default = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
|
||||
}
|
||||
|
||||
variable "log_retention_days" {
|
||||
description = "Number of days to retain CloudWatch logs"
|
||||
type = number
|
||||
default = 30
|
||||
}
|
||||
|
||||
variable "vpc_cni_version" {
|
||||
description = "Version of VPC CNI addon"
|
||||
type = string
|
||||
default = "v1.14.0-eksbuild.3"
|
||||
}
|
||||
|
||||
variable "kube_proxy_version" {
|
||||
description = "Version of kube-proxy addon"
|
||||
type = string
|
||||
default = "v1.28.1-eksbuild.1"
|
||||
}
|
||||
|
||||
variable "coredns_version" {
|
||||
description = "Version of CoreDNS addon"
|
||||
type = string
|
||||
default = "v1.10.1-eksbuild.2"
|
||||
}
|
||||
|
||||
variable "ebs_csi_driver_version" {
|
||||
description = "Version of EBS CSI driver addon"
|
||||
type = string
|
||||
default = "v1.24.0-eksbuild.1"
|
||||
}
|
||||
|
||||
variable "enable_ebs_csi_driver" {
|
||||
description = "Enable EBS CSI driver addon"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_irsa" {
|
||||
description = "Enable IAM Roles for Service Accounts (IRSA)"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_aws_load_balancer_controller" {
|
||||
description = "Enable AWS Load Balancer Controller IAM role"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "node_instance_types" {
|
||||
description = "List of instance types for EKS nodes"
|
||||
type = list(string)
|
||||
default = ["t3.medium"]
|
||||
}
|
||||
|
||||
variable "capacity_type" {
|
||||
description = "Capacity type for EKS nodes (ON_DEMAND or SPOT)"
|
||||
type = string
|
||||
default = "ON_DEMAND"
|
||||
}
|
||||
|
||||
variable "node_disk_size" {
|
||||
description = "Disk size for EKS nodes in GB"
|
||||
type = number
|
||||
default = 50
|
||||
}
|
||||
|
||||
variable "min_nodes" {
|
||||
description = "Minimum number of EKS nodes"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "max_nodes" {
|
||||
description = "Maximum number of EKS nodes"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "desired_nodes" {
|
||||
description = "Desired number of EKS nodes"
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "node_labels" {
|
||||
description = "Labels to apply to EKS nodes"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "node_taints" {
|
||||
description = "Taints to apply to EKS nodes"
|
||||
type = list(object({
|
||||
key = string
|
||||
value = string
|
||||
effect = string
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
334
terraform/modules/security/main.tf
Normal file
334
terraform/modules/security/main.tf
Normal file
|
|
@ -0,0 +1,334 @@
|
|||
# Security Module for Code-Server
|
||||
# Creates security groups, IAM roles, and ACM certificates
|
||||
|
||||
# Security Group for ALB
|
||||
resource "aws_security_group" "alb" {
|
||||
name_prefix = "${var.name_prefix}-alb-"
|
||||
description = "Security group for Application Load Balancer"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
ingress {
|
||||
description = "HTTPS from anywhere"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = var.allowed_cidr_blocks
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "HTTP from anywhere (redirect to HTTPS)"
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
protocol = "tcp"
|
||||
cidr_blocks = var.allowed_cidr_blocks
|
||||
}
|
||||
|
||||
egress {
|
||||
description = "Allow all outbound"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-alb-sg"
|
||||
}
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Security Group for EC2 Code-Server instances
|
||||
resource "aws_security_group" "code_server_ec2" {
|
||||
name_prefix = "${var.name_prefix}-code-server-ec2-"
|
||||
description = "Security group for Code-Server EC2 instances"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
ingress {
|
||||
description = "Code-Server port from ALB"
|
||||
from_port = 8080
|
||||
to_port = 8080
|
||||
protocol = "tcp"
|
||||
security_groups = [aws_security_group.alb.id]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "OAuth2 Proxy from ALB"
|
||||
from_port = 4180
|
||||
to_port = 4180
|
||||
protocol = "tcp"
|
||||
security_groups = [aws_security_group.alb.id]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "SSH from bastion (if needed)"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = var.ssh_allowed_cidr_blocks
|
||||
}
|
||||
|
||||
egress {
|
||||
description = "Allow all outbound"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-code-server-ec2-sg"
|
||||
}
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Security Group for EKS Cluster
|
||||
resource "aws_security_group" "eks_cluster" {
|
||||
name_prefix = "${var.name_prefix}-eks-cluster-"
|
||||
description = "Security group for EKS cluster control plane"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
egress {
|
||||
description = "Allow all outbound"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-eks-cluster-sg"
|
||||
}
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Security Group for EKS Nodes
|
||||
resource "aws_security_group" "eks_nodes" {
|
||||
name_prefix = "${var.name_prefix}-eks-nodes-"
|
||||
description = "Security group for EKS worker nodes"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
ingress {
|
||||
description = "Allow nodes to communicate with each other"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "tcp"
|
||||
self = true
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "Allow pods to communicate with the cluster API Server"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
security_groups = [aws_security_group.eks_cluster.id]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "Allow ALB to reach pods"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "tcp"
|
||||
security_groups = [aws_security_group.alb.id]
|
||||
}
|
||||
|
||||
egress {
|
||||
description = "Allow all outbound"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-eks-nodes-sg"
|
||||
}
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Allow EKS control plane to communicate with nodes
|
||||
resource "aws_security_group_rule" "cluster_to_nodes" {
|
||||
description = "Allow control plane to communicate with worker nodes"
|
||||
from_port = 1025
|
||||
to_port = 65535
|
||||
protocol = "tcp"
|
||||
security_group_id = aws_security_group.eks_nodes.id
|
||||
source_security_group_id = aws_security_group.eks_cluster.id
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
# IAM Role for EC2 Code-Server instances
|
||||
resource "aws_iam_role" "code_server_ec2" {
|
||||
name = "${var.name_prefix}-code-server-ec2-role"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Action = "sts:AssumeRole"
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Service = "ec2.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# IAM Instance Profile for EC2
|
||||
resource "aws_iam_instance_profile" "code_server_ec2" {
|
||||
name = "${var.name_prefix}-code-server-ec2-profile"
|
||||
role = aws_iam_role.code_server_ec2.name
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Attach SSM policy for Systems Manager access
|
||||
resource "aws_iam_role_policy_attachment" "code_server_ec2_ssm" {
|
||||
role = aws_iam_role.code_server_ec2.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
|
||||
}
|
||||
|
||||
# Attach CloudWatch policy for logging
|
||||
resource "aws_iam_role_policy_attachment" "code_server_ec2_cloudwatch" {
|
||||
role = aws_iam_role.code_server_ec2.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
|
||||
}
|
||||
|
||||
# Custom policy for ECR access (to pull container images)
|
||||
resource "aws_iam_role_policy" "code_server_ec2_ecr" {
|
||||
name = "${var.name_prefix}-code-server-ec2-ecr-policy"
|
||||
role = aws_iam_role.code_server_ec2.id
|
||||
|
||||
policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Effect = "Allow"
|
||||
Action = [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:BatchGetImage"
|
||||
]
|
||||
Resource = "*"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
# IAM Role for EKS Cluster
|
||||
resource "aws_iam_role" "eks_cluster" {
|
||||
name = "${var.name_prefix}-eks-cluster-role"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Action = "sts:AssumeRole"
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Service = "eks.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Attach required policies for EKS cluster
|
||||
resource "aws_iam_role_policy_attachment" "eks_cluster_policy" {
|
||||
role = aws_iam_role.eks_cluster.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "eks_vpc_resource_controller" {
|
||||
role = aws_iam_role.eks_cluster.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
|
||||
}
|
||||
|
||||
# IAM Role for EKS Node Group
|
||||
resource "aws_iam_role" "eks_nodes" {
|
||||
name = "${var.name_prefix}-eks-nodes-role"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Action = "sts:AssumeRole"
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Service = "ec2.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Attach required policies for EKS nodes
|
||||
resource "aws_iam_role_policy_attachment" "eks_worker_node_policy" {
|
||||
role = aws_iam_role.eks_nodes.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "eks_cni_policy" {
|
||||
role = aws_iam_role.eks_nodes.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "eks_container_registry_policy" {
|
||||
role = aws_iam_role.eks_nodes.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "eks_ssm_policy" {
|
||||
role = aws_iam_role.eks_nodes.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
|
||||
}
|
||||
|
||||
# KMS Key for encryption at rest
|
||||
resource "aws_kms_key" "code_server" {
|
||||
description = "KMS key for Code-Server encryption"
|
||||
deletion_window_in_days = 10
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-kms-key"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_kms_alias" "code_server" {
|
||||
name = "alias/${var.name_prefix}-code-server"
|
||||
target_key_id = aws_kms_key.code_server.key_id
|
||||
}
|
||||
51
terraform/modules/security/outputs.tf
Normal file
51
terraform/modules/security/outputs.tf
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
# Security Module Outputs
|
||||
|
||||
output "alb_security_group_id" {
|
||||
description = "ID of the ALB security group"
|
||||
value = aws_security_group.alb.id
|
||||
}
|
||||
|
||||
output "code_server_ec2_security_group_id" {
|
||||
description = "ID of the Code-Server EC2 security group"
|
||||
value = aws_security_group.code_server_ec2.id
|
||||
}
|
||||
|
||||
output "eks_cluster_security_group_id" {
|
||||
description = "ID of the EKS cluster security group"
|
||||
value = aws_security_group.eks_cluster.id
|
||||
}
|
||||
|
||||
output "eks_nodes_security_group_id" {
|
||||
description = "ID of the EKS nodes security group"
|
||||
value = aws_security_group.eks_nodes.id
|
||||
}
|
||||
|
||||
output "code_server_ec2_iam_role_arn" {
|
||||
description = "ARN of the Code-Server EC2 IAM role"
|
||||
value = aws_iam_role.code_server_ec2.arn
|
||||
}
|
||||
|
||||
output "code_server_ec2_instance_profile_name" {
|
||||
description = "Name of the Code-Server EC2 instance profile"
|
||||
value = aws_iam_instance_profile.code_server_ec2.name
|
||||
}
|
||||
|
||||
output "eks_cluster_iam_role_arn" {
|
||||
description = "ARN of the EKS cluster IAM role"
|
||||
value = aws_iam_role.eks_cluster.arn
|
||||
}
|
||||
|
||||
output "eks_nodes_iam_role_arn" {
|
||||
description = "ARN of the EKS nodes IAM role"
|
||||
value = aws_iam_role.eks_nodes.arn
|
||||
}
|
||||
|
||||
output "kms_key_id" {
|
||||
description = "ID of the KMS key"
|
||||
value = aws_kms_key.code_server.key_id
|
||||
}
|
||||
|
||||
output "kms_key_arn" {
|
||||
description = "ARN of the KMS key"
|
||||
value = aws_kms_key.code_server.arn
|
||||
}
|
||||
29
terraform/modules/security/variables.tf
Normal file
29
terraform/modules/security/variables.tf
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# Security Module Variables
|
||||
|
||||
variable "name_prefix" {
|
||||
description = "Prefix for resource names"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
description = "ID of the VPC"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "allowed_cidr_blocks" {
|
||||
description = "CIDR blocks allowed to access the ALB"
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "ssh_allowed_cidr_blocks" {
|
||||
description = "CIDR blocks allowed to SSH into instances"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
239
terraform/modules/vpc/main.tf
Normal file
239
terraform/modules/vpc/main.tf
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
# VPC Module for Code-Server Deployment
|
||||
# Creates a secure VPC with public and private subnets, NAT gateway, and VPC endpoints
|
||||
|
||||
locals {
|
||||
azs = slice(data.aws_availability_zones.available.names, 0, 3)
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
state = "available"
|
||||
}
|
||||
|
||||
# VPC
|
||||
resource "aws_vpc" "main" {
|
||||
cidr_block = var.vpc_cidr
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-vpc"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Internet Gateway
|
||||
resource "aws_internet_gateway" "main" {
|
||||
vpc_id = aws_vpc.main.id
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-igw"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Public Subnets
|
||||
resource "aws_subnet" "public" {
|
||||
count = length(var.public_subnet_cidrs)
|
||||
vpc_id = aws_vpc.main.id
|
||||
cidr_block = var.public_subnet_cidrs[count.index]
|
||||
availability_zone = local.azs[count.index]
|
||||
|
||||
map_public_ip_on_launch = true
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-public-subnet-${count.index + 1}"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Private Subnets
|
||||
resource "aws_subnet" "private" {
|
||||
count = length(var.private_subnet_cidrs)
|
||||
vpc_id = aws_vpc.main.id
|
||||
cidr_block = var.private_subnet_cidrs[count.index]
|
||||
availability_zone = local.azs[count.index]
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-private-subnet-${count.index + 1}"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Elastic IPs for NAT Gateways
|
||||
resource "aws_eip" "nat" {
|
||||
count = var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : length(var.public_subnet_cidrs)) : 0
|
||||
domain = "vpc"
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-nat-eip-${count.index + 1}"
|
||||
}
|
||||
)
|
||||
|
||||
depends_on = [aws_internet_gateway.main]
|
||||
}
|
||||
|
||||
# NAT Gateways
|
||||
resource "aws_nat_gateway" "main" {
|
||||
count = var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : length(var.public_subnet_cidrs)) : 0
|
||||
allocation_id = aws_eip.nat[count.index].id
|
||||
subnet_id = aws_subnet.public[count.index].id
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-nat-${count.index + 1}"
|
||||
}
|
||||
)
|
||||
|
||||
depends_on = [aws_internet_gateway.main]
|
||||
}
|
||||
|
||||
# Public Route Table
|
||||
resource "aws_route_table" "public" {
|
||||
vpc_id = aws_vpc.main.id
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-public-rt"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Public Route
|
||||
resource "aws_route" "public" {
|
||||
route_table_id = aws_route_table.public.id
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
gateway_id = aws_internet_gateway.main.id
|
||||
}
|
||||
|
||||
# Public Route Table Association
|
||||
resource "aws_route_table_association" "public" {
|
||||
count = length(var.public_subnet_cidrs)
|
||||
subnet_id = aws_subnet.public[count.index].id
|
||||
route_table_id = aws_route_table.public.id
|
||||
}
|
||||
|
||||
# Private Route Tables
|
||||
resource "aws_route_table" "private" {
|
||||
count = var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : length(var.private_subnet_cidrs)) : 0
|
||||
vpc_id = aws_vpc.main.id
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-private-rt-${count.index + 1}"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Private Routes
|
||||
resource "aws_route" "private" {
|
||||
count = var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : length(var.private_subnet_cidrs)) : 0
|
||||
route_table_id = aws_route_table.private[count.index].id
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = aws_nat_gateway.main[var.single_nat_gateway ? 0 : count.index].id
|
||||
}
|
||||
|
||||
# Private Route Table Associations
|
||||
resource "aws_route_table_association" "private" {
|
||||
count = length(var.private_subnet_cidrs)
|
||||
subnet_id = aws_subnet.private[count.index].id
|
||||
route_table_id = aws_route_table.private[var.single_nat_gateway ? 0 : count.index].id
|
||||
}
|
||||
|
||||
# VPC Endpoints for enhanced security (S3 and ECR for EKS)
|
||||
resource "aws_vpc_endpoint" "s3" {
|
||||
count = var.enable_vpc_endpoints ? 1 : 0
|
||||
vpc_id = aws_vpc.main.id
|
||||
service_name = "com.amazonaws.${var.aws_region}.s3"
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
Name = "${var.name_prefix}-s3-endpoint"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_vpc_endpoint_route_table_association" "s3_private" {
|
||||
count = var.enable_vpc_endpoints ? length(aws_route_table.private) : 0
|
||||
route_table_id = aws_route_table.private[count.index].id
|
||||
vpc_endpoint_id = aws_vpc_endpoint.s3[0].id
|
||||
}
|
||||
|
||||
# VPC Flow Logs for security monitoring
|
||||
resource "aws_cloudwatch_log_group" "vpc_flow_logs" {
|
||||
count = var.enable_flow_logs ? 1 : 0
|
||||
name = "/aws/vpc/${var.name_prefix}-flow-logs"
|
||||
retention_in_days = var.flow_logs_retention_days
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "vpc_flow_logs" {
|
||||
count = var.enable_flow_logs ? 1 : 0
|
||||
name = "${var.name_prefix}-vpc-flow-logs-role"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Action = "sts:AssumeRole"
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Service = "vpc-flow-logs.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "vpc_flow_logs" {
|
||||
count = var.enable_flow_logs ? 1 : 0
|
||||
name = "${var.name_prefix}-vpc-flow-logs-policy"
|
||||
role = aws_iam_role.vpc_flow_logs[0].id
|
||||
|
||||
policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Action = [
|
||||
"logs:CreateLogGroup",
|
||||
"logs:CreateLogStream",
|
||||
"logs:PutLogEvents",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:DescribeLogStreams"
|
||||
]
|
||||
Effect = "Allow"
|
||||
Resource = "*"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "aws_flow_log" "main" {
|
||||
count = var.enable_flow_logs ? 1 : 0
|
||||
iam_role_arn = aws_iam_role.vpc_flow_logs[0].arn
|
||||
log_destination = aws_cloudwatch_log_group.vpc_flow_logs[0].arn
|
||||
traffic_type = "ALL"
|
||||
vpc_id = aws_vpc.main.id
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
41
terraform/modules/vpc/outputs.tf
Normal file
41
terraform/modules/vpc/outputs.tf
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# VPC Module Outputs
|
||||
|
||||
output "vpc_id" {
|
||||
description = "ID of the VPC"
|
||||
value = aws_vpc.main.id
|
||||
}
|
||||
|
||||
output "vpc_cidr" {
|
||||
description = "CIDR block of the VPC"
|
||||
value = aws_vpc.main.cidr_block
|
||||
}
|
||||
|
||||
output "public_subnet_ids" {
|
||||
description = "IDs of public subnets"
|
||||
value = aws_subnet.public[*].id
|
||||
}
|
||||
|
||||
output "private_subnet_ids" {
|
||||
description = "IDs of private subnets"
|
||||
value = aws_subnet.private[*].id
|
||||
}
|
||||
|
||||
output "nat_gateway_ids" {
|
||||
description = "IDs of NAT gateways"
|
||||
value = aws_nat_gateway.main[*].id
|
||||
}
|
||||
|
||||
output "internet_gateway_id" {
|
||||
description = "ID of the internet gateway"
|
||||
value = aws_internet_gateway.main.id
|
||||
}
|
||||
|
||||
output "public_route_table_id" {
|
||||
description = "ID of the public route table"
|
||||
value = aws_route_table.public.id
|
||||
}
|
||||
|
||||
output "private_route_table_ids" {
|
||||
description = "IDs of private route tables"
|
||||
value = aws_route_table.private[*].id
|
||||
}
|
||||
70
terraform/modules/vpc/variables.tf
Normal file
70
terraform/modules/vpc/variables.tf
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
# VPC Module Variables
|
||||
|
||||
variable "name_prefix" {
|
||||
description = "Prefix for resource names"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_cidr" {
|
||||
description = "CIDR block for VPC"
|
||||
type = string
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "public_subnet_cidrs" {
|
||||
description = "CIDR blocks for public subnets"
|
||||
type = list(string)
|
||||
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
}
|
||||
|
||||
variable "private_subnet_cidrs" {
|
||||
description = "CIDR blocks for private subnets"
|
||||
type = list(string)
|
||||
default = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
}
|
||||
|
||||
variable "aws_region" {
|
||||
description = "AWS region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster (for subnet tagging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "enable_nat_gateway" {
|
||||
description = "Enable NAT gateway for private subnets"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "single_nat_gateway" {
|
||||
description = "Use a single NAT gateway for all private subnets (cost optimization)"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_vpc_endpoints" {
|
||||
description = "Enable VPC endpoints for AWS services"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_flow_logs" {
|
||||
description = "Enable VPC flow logs"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "flow_logs_retention_days" {
|
||||
description = "Number of days to retain VPC flow logs"
|
||||
type = number
|
||||
default = 30
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
129
terraform/scripts/deploy-ec2.sh
Executable file
129
terraform/scripts/deploy-ec2.sh
Executable file
|
|
@ -0,0 +1,129 @@
|
|||
#!/bin/bash
|
||||
# Deployment script for Code-Server on EC2
|
||||
# This script automates the deployment process
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DEPLOYMENT_DIR="${SCRIPT_DIR}/../deployments/ec2"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
echo_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
echo_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
echo_info "Checking prerequisites..."
|
||||
|
||||
# Check Terraform
|
||||
if ! command -v terraform &> /dev/null; then
|
||||
echo_error "Terraform is not installed. Please install Terraform first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check AWS CLI
|
||||
if ! command -v aws &> /dev/null; then
|
||||
echo_error "AWS CLI is not installed. Please install AWS CLI first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check AWS credentials
|
||||
if ! aws sts get-caller-identity &> /dev/null; then
|
||||
echo_error "AWS credentials are not configured. Please configure AWS credentials first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo_info "All prerequisites met!"
|
||||
}
|
||||
|
||||
# Initialize Terraform
|
||||
init_terraform() {
|
||||
echo_info "Initializing Terraform..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform init
|
||||
}
|
||||
|
||||
# Validate Terraform configuration
|
||||
validate_terraform() {
|
||||
echo_info "Validating Terraform configuration..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform validate
|
||||
}
|
||||
|
||||
# Plan Terraform deployment
|
||||
plan_terraform() {
|
||||
echo_info "Planning Terraform deployment..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform plan -out=tfplan
|
||||
}
|
||||
|
||||
# Apply Terraform deployment
|
||||
apply_terraform() {
|
||||
echo_info "Applying Terraform deployment..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
|
||||
read -p "Do you want to apply this plan? (yes/no): " response
|
||||
if [ "$response" != "yes" ]; then
|
||||
echo_warn "Deployment cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
terraform apply tfplan
|
||||
rm -f tfplan
|
||||
}
|
||||
|
||||
# Get outputs
|
||||
get_outputs() {
|
||||
echo_info "Getting deployment outputs..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
|
||||
echo ""
|
||||
echo_info "=== Deployment Complete ==="
|
||||
echo ""
|
||||
|
||||
ALB_URL=$(terraform output -raw alb_url 2>/dev/null || echo "N/A")
|
||||
SECRET_ARN=$(terraform output -raw code_server_password_secret_arn 2>/dev/null || echo "N/A")
|
||||
REGION=$(terraform output -raw aws_region 2>/dev/null || echo "us-east-1")
|
||||
|
||||
echo "Code-Server URL: $ALB_URL"
|
||||
echo ""
|
||||
echo "To get the code-server password, run:"
|
||||
echo " aws secretsmanager get-secret-value \\"
|
||||
echo " --secret-id $SECRET_ARN \\"
|
||||
echo " --region $REGION \\"
|
||||
echo " --query SecretString \\"
|
||||
echo " --output text"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Main deployment flow
|
||||
main() {
|
||||
echo_info "Starting Code-Server EC2 deployment..."
|
||||
echo ""
|
||||
|
||||
check_prerequisites
|
||||
init_terraform
|
||||
validate_terraform
|
||||
plan_terraform
|
||||
apply_terraform
|
||||
get_outputs
|
||||
|
||||
echo_info "Deployment completed successfully!"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
204
terraform/scripts/deploy-eks.sh
Executable file
204
terraform/scripts/deploy-eks.sh
Executable file
|
|
@ -0,0 +1,204 @@
|
|||
#!/bin/bash
|
||||
# Deployment script for Code-Server on EKS
|
||||
# This script automates the deployment process
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DEPLOYMENT_DIR="${SCRIPT_DIR}/../deployments/eks"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
echo_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
echo_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
echo_info "Checking prerequisites..."
|
||||
|
||||
# Check Terraform
|
||||
if ! command -v terraform &> /dev/null; then
|
||||
echo_error "Terraform is not installed. Please install Terraform first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check AWS CLI
|
||||
if ! command -v aws &> /dev/null; then
|
||||
echo_error "AWS CLI is not installed. Please install AWS CLI first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo_error "kubectl is not installed. Please install kubectl first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Helm
|
||||
if ! command -v helm &> /dev/null; then
|
||||
echo_error "Helm is not installed. Please install Helm first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check AWS credentials
|
||||
if ! aws sts get-caller-identity &> /dev/null; then
|
||||
echo_error "AWS credentials are not configured. Please configure AWS credentials first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo_info "All prerequisites met!"
|
||||
}
|
||||
|
||||
# Initialize Terraform
|
||||
init_terraform() {
|
||||
echo_info "Initializing Terraform..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform init
|
||||
}
|
||||
|
||||
# Validate Terraform configuration
|
||||
validate_terraform() {
|
||||
echo_info "Validating Terraform configuration..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform validate
|
||||
}
|
||||
|
||||
# Plan Terraform deployment
|
||||
plan_terraform() {
|
||||
echo_info "Planning Terraform deployment..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform plan -out=tfplan
|
||||
}
|
||||
|
||||
# Apply Terraform deployment
|
||||
apply_terraform() {
|
||||
echo_info "Applying Terraform deployment..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
|
||||
read -p "Do you want to apply this plan? (yes/no): " response
|
||||
if [ "$response" != "yes" ]; then
|
||||
echo_warn "Deployment cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
terraform apply tfplan
|
||||
rm -f tfplan
|
||||
}
|
||||
|
||||
# Configure kubectl
|
||||
configure_kubectl() {
|
||||
echo_info "Configuring kubectl..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
|
||||
CLUSTER_NAME=$(terraform output -raw eks_cluster_id)
|
||||
REGION=$(terraform output -raw aws_region 2>/dev/null || echo "us-east-1")
|
||||
|
||||
aws eks update-kubeconfig --region "$REGION" --name "$CLUSTER_NAME"
|
||||
|
||||
echo_info "Waiting for nodes to be ready..."
|
||||
kubectl wait --for=condition=Ready nodes --all --timeout=300s
|
||||
}
|
||||
|
||||
# Deploy Code-Server
|
||||
deploy_code_server() {
|
||||
echo_info "Deploying Code-Server..."
|
||||
|
||||
read -p "Do you want to deploy Code-Server now? (yes/no): " response
|
||||
if [ "$response" != "yes" ]; then
|
||||
echo_warn "Code-Server deployment skipped. You can deploy it manually later."
|
||||
return
|
||||
fi
|
||||
|
||||
cd "${SCRIPT_DIR}/../ci/helm-chart"
|
||||
|
||||
helm upgrade --install code-server . \
|
||||
--namespace code-server \
|
||||
--create-namespace \
|
||||
--values "${DEPLOYMENT_DIR}/k8s/code-server-values.yaml" \
|
||||
--wait \
|
||||
--timeout 10m
|
||||
|
||||
echo_info "Code-Server deployed successfully!"
|
||||
}
|
||||
|
||||
# Deploy OAuth2 Proxy (optional)
|
||||
deploy_oauth2_proxy() {
|
||||
echo_info "OAuth2 Proxy deployment..."
|
||||
|
||||
read -p "Do you want to deploy OAuth2 Proxy for SAML authentication? (yes/no): " response
|
||||
if [ "$response" != "yes" ]; then
|
||||
echo_warn "OAuth2 Proxy deployment skipped."
|
||||
return
|
||||
fi
|
||||
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
kubectl apply -f k8s/oauth2-proxy.yaml
|
||||
|
||||
echo_info "OAuth2 Proxy deployed successfully!"
|
||||
}
|
||||
|
||||
# Get outputs
|
||||
get_outputs() {
|
||||
echo_info "Getting deployment information..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
|
||||
echo ""
|
||||
echo_info "=== Deployment Complete ==="
|
||||
echo ""
|
||||
|
||||
CLUSTER_NAME=$(terraform output -raw eks_cluster_id)
|
||||
REGION=$(terraform output -raw aws_region 2>/dev/null || echo "us-east-1")
|
||||
|
||||
echo "EKS Cluster: $CLUSTER_NAME"
|
||||
echo "Region: $REGION"
|
||||
echo ""
|
||||
|
||||
echo "To get the Load Balancer URL, run:"
|
||||
echo " kubectl get ingress -n code-server"
|
||||
echo ""
|
||||
|
||||
echo "To access Code-Server:"
|
||||
echo " 1. Wait for the ingress to get an ADDRESS (ALB DNS name)"
|
||||
echo " 2. Access the URL shown in the ADDRESS field"
|
||||
echo ""
|
||||
|
||||
echo "Useful commands:"
|
||||
echo " kubectl get pods -n code-server"
|
||||
echo " kubectl logs -n code-server -l app.kubernetes.io/name=code-server"
|
||||
echo " kubectl port-forward -n code-server svc/code-server 8080:8080"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Main deployment flow
|
||||
main() {
|
||||
echo_info "Starting Code-Server EKS deployment..."
|
||||
echo ""
|
||||
|
||||
check_prerequisites
|
||||
init_terraform
|
||||
validate_terraform
|
||||
plan_terraform
|
||||
apply_terraform
|
||||
configure_kubectl
|
||||
deploy_code_server
|
||||
deploy_oauth2_proxy
|
||||
get_outputs
|
||||
|
||||
echo_info "Deployment completed successfully!"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
45
terraform/scripts/destroy-ec2.sh
Executable file
45
terraform/scripts/destroy-ec2.sh
Executable file
|
|
@ -0,0 +1,45 @@
|
|||
#!/bin/bash
|
||||
# Destroy script for Code-Server EC2 deployment
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DEPLOYMENT_DIR="${SCRIPT_DIR}/../deployments/ec2"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
echo_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
echo_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
main() {
|
||||
echo_warn "WARNING: This will destroy all Code-Server EC2 infrastructure!"
|
||||
echo_warn "This action cannot be undone!"
|
||||
echo ""
|
||||
|
||||
read -p "Are you sure you want to continue? (type 'yes' to confirm): " response
|
||||
if [ "$response" != "yes" ]; then
|
||||
echo_info "Destruction cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo_info "Destroying Code-Server EC2 infrastructure..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform destroy
|
||||
|
||||
echo_info "Destruction completed!"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
62
terraform/scripts/destroy-eks.sh
Executable file
62
terraform/scripts/destroy-eks.sh
Executable file
|
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
# Destroy script for Code-Server EKS deployment
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DEPLOYMENT_DIR="${SCRIPT_DIR}/../deployments/eks"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
echo_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
echo_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
cleanup_k8s_resources() {
|
||||
echo_info "Cleaning up Kubernetes resources..."
|
||||
|
||||
# Delete Code-Server Helm release
|
||||
helm uninstall code-server -n code-server 2>/dev/null || true
|
||||
|
||||
# Delete OAuth2 Proxy
|
||||
kubectl delete -f "${DEPLOYMENT_DIR}/k8s/oauth2-proxy.yaml" 2>/dev/null || true
|
||||
|
||||
# Delete namespace
|
||||
kubectl delete namespace code-server 2>/dev/null || true
|
||||
|
||||
echo_info "Kubernetes resources cleaned up!"
|
||||
}
|
||||
|
||||
main() {
|
||||
echo_warn "WARNING: This will destroy all Code-Server EKS infrastructure!"
|
||||
echo_warn "This action cannot be undone!"
|
||||
echo ""
|
||||
|
||||
read -p "Are you sure you want to continue? (type 'yes' to confirm): " response
|
||||
if [ "$response" != "yes" ]; then
|
||||
echo_info "Destruction cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cleanup_k8s_resources
|
||||
|
||||
echo_info "Destroying Code-Server EKS infrastructure..."
|
||||
cd "${DEPLOYMENT_DIR}"
|
||||
terraform destroy
|
||||
|
||||
echo_info "Destruction completed!"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
Loading…
Reference in a new issue