diff --git a/.github/workflows/apply-napv5.yml b/.github/workflows/apply-napv5.yml new file mode 100644 index 00000000..6ce373dc --- /dev/null +++ b/.github/workflows/apply-napv5.yml @@ -0,0 +1,376 @@ +name: "NGINX NAP/NIC-V5 Deployment" +on: + push: + branches: [apply-nap] +env: + AWS_REGION: us-east-1 +jobs: + bootstrap_infra: + name: "Bootstrap S3/DynamoDB" + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./s3 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Initialize Terraform (S3 Backend) + run: | + terraform init + + - name: Terraform Plan + run: terraform plan -no-color -input=false -out=tfplan + + - name: Check for Changes + id: check_changes + run: | + if grep -q "No changes." <(terraform show -no-color tfplan); then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Apply + if: github.event_name == 'push' && github.ref == 'refs/heads/apply-nap' && steps.check_changes.outputs.has_changes == 'true' + run: terraform apply -auto-approve tfplan + + + + terraform_infra: + name: "AWS Infra" + runs-on: ubuntu-latest + needs: bootstrap_infra + permissions: + contents: read + defaults: + run: + working-directory: ./infra + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + + - name: Initialize Terraform (S3 Backend) + run: | + terraform init + + - name: Terraform Plan + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -no-color -input=false -out=tfplan + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Apply + if: github.event_name == 'push' && github.ref == 'refs/heads/apply-nap' && steps.check_changes.outputs.has_changes == 'true' + run: terraform apply -auto-approve tfplan + + + terraform_eks: + name: "AWS EKS" + runs-on: ubuntu-latest + needs: terraform_infra + defaults: + run: + working-directory: ./eks-cluster + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + + - name: Terraform Init + run: terraform init + + - name: Terraform Plan + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -no-color -input=false -out=tfplan + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Apply + if: github.event_name == 'push' && github.ref == 'refs/heads/apply-nap' && steps.check_changes.outputs.has_changes == 'true' + run: terraform apply -auto-approve tfplan + + terraform_nap: + name: "NGINX App Protect" + runs-on: ubuntu-latest + needs: terraform_eks + defaults: + run: + working-directory: ./nap + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Plan + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -no-color -input=false -lock=false -out=tfplan \ + -var="workspace_path=${{ env.WORKSPACE_PATH }}" \ + -var="nginx_jwt=${{ secrets.NGINX_JWT }}" \ + -var="nginx_pwd=none" + terraform show -no-color tfplan > plan.txt + env: + WORKSPACE_PATH: "./nap" + + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Apply + if: github.event_name == 'push' && github.ref == 'refs/heads/apply-nap' && steps.check_changes.outputs.has_changes == 'true' + run: terraform apply -auto-approve -lock=false tfplan + + terraform_policy: + name: "NGINX Policy" + runs-on: ubuntu-latest + needs: terraform_nap + defaults: + run: + working-directory: ./policy + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + + - name: Terraform Init (EKS) + run: terraform init + working-directory: ./eks-cluster + + - name: Print EKS Terraform Outputs + run: terraform output + working-directory: ./eks-cluster + + - name: Fetch EKS Cluster Name and Region + run: | + echo "EKS_CLUSTER_NAME=$(terraform output -raw cluster_name)" >> $GITHUB_ENV + echo "AWS_REGION=$AWS_REGION" >> $GITHUB_ENV + working-directory: ./eks-cluster + + - name: Configure kubectl for EKS + run: | + aws eks update-kubeconfig --name $EKS_CLUSTER_NAME --region $AWS_REGION + + - name: Verify kubectl connectivity + run: kubectl get nodes -n nginx-ingress + + - name: Install Docker and Docker Compose + run: | + sudo apt-get update -y + sudo apt-get remove -y containerd containerd.io + sudo apt-get install apt-transport-https ca-certificates curl software-properties-common -y + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get update -y + sudo apt-get install docker-ce docker-ce-cli containerd.io -y + sudo service docker start + sudo usermod -aG docker $USER + + sudo curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + + - name: Create Certificates Directory for Docker + run: | + sudo mkdir -p /etc/docker/certs.d/private-registry.nginx.com + + - name: Create NGINX Repository Certificate for Docker + run: echo "${{ secrets.NGINX_REPO_CRT }}" | sudo tee /etc/docker/certs.d/private-registry.nginx.com/client.cert > /dev/null + + - name: Create NGINX Repository Key for Docker + run: echo "${{ secrets.NGINX_REPO_KEY }}" | sudo tee /etc/docker/certs.d/private-registry.nginx.com/client.key > /dev/null + + - name: Create Certificates Directory for NGINX + run: | + sudo mkdir -p /etc/ssl/nginx + + - name: Save NGINX Repository Certificate for NGINX + run: echo "${{ secrets.NGINX_REPO_CRT }}" | sudo tee /etc/ssl/nginx/nginx-repo.crt > /dev/null + + - name: Save NGINX Repository Key for NGINX + run: echo "${{ secrets.NGINX_REPO_KEY }}" | sudo tee /etc/ssl/nginx/nginx-repo.key > /dev/null + + - name: Build Docker Image + run: | + docker build --no-cache \ + --secret id=nginx-crt,src=/etc/ssl/nginx/nginx-repo.crt \ + --secret id=nginx-key,src=/etc/ssl/nginx/nginx-repo.key \ + -t waf-compiler-5.4.0:custom . + + - name: Ensure correct permissions for nap/charts directory + run: | + sudo chown -R $USER:$USER ${{ github.workspace }} + sudo chmod -R 777 ${{ github.workspace }} + + - name: Run Docker Container as Root + run: | + docker run --rm \ + -v ${{ github.workspace }}:/workspace \ + waf-compiler-5.4.0:custom \ + -p /workspace/policy/policy.json -o /workspace/policy/compiled_policy.tgz + + - name: Fix permissions for compiled files + run: | + sudo chown -R $USER:$USER ${{ github.workspace }}/policy + chmod 644 ${{ github.workspace }}/policy/compiled_policy.tgz + ls -lh ${{ github.workspace }}/policy + + + - name: Copy Compiled Policy to NGINX Ingress Controller + run: | + NGINX_POD=$(kubectl get pods -n nginx-ingress -l app.kubernetes.io/name=nginx-ingress -o jsonpath='{.items[0].metadata.name}') + if [ -z "$NGINX_POD" ]; then + echo "Error: NGINX Ingress Controller pod not found!" + exit 1 + fi + kubectl cp ${{ github.workspace }}/policy/compiled_policy.tgz $NGINX_POD:/etc/app_protect/bundles/compiled_policy.tgz -n nginx-ingress + + - name: Terraform Init + run: terraform init + + - name: Terraform Plan + run: | + terraform plan -no-color -input=false -out=tfplan + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Apply + if: github.event_name == 'push' && github.ref == 'refs/heads/apply-nap' && steps.check_changes.outputs.has_changes == 'true' + run: terraform apply -auto-approve tfplan + + terraform_arcadia: + name: "Arcadia WebApp" + runs-on: ubuntu-latest + needs: terraform_policy + defaults: + run: + working-directory: ./arcadia + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Validate + run: terraform validate -no-color + + - name: Terraform Plan + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -no-color -input=false -out=tfplan + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Apply + if: github.event_name == 'push' && github.ref == 'refs/heads/apply-nap' && steps.check_changes.outputs.has_changes == 'true' + run: terraform apply -auto-approve tfplan diff --git a/.github/workflows/destroy-nic-nap.yml b/.github/workflows/destroy-nic-nap.yml new file mode 100644 index 00000000..4fecb09e --- /dev/null +++ b/.github/workflows/destroy-nic-nap.yml @@ -0,0 +1,365 @@ +name: "NGINX V5-NIC/NAP Destroy" +on: + push: + branches: + - destroy-nic-napv5 + pull_request: +env: + AWS_REGION: us-east-1 +jobs: + terraform_arcadia: + name: "Destroy Arcadia WebApp" + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./arcadia + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Validate + run: terraform validate -no-color + + - name: Terraform Plan (Destroy) + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -destroy -no-color -input=false -lock=false -out=tfplan + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Destroy + if: github.event_name == 'push' && github.ref == 'refs/heads/destroy-nic-napv5' && steps.check_changes.outputs.has_changes == 'true' + run: terraform destroy -auto-approve -lock=false -input=false + + terraform_policy: + name: "Destroy NGINX Policy" + runs-on: ubuntu-latest + needs: terraform_arcadia + defaults: + run: + working-directory: ./policy + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Destroy + run: terraform destroy -auto-approve -lock=false + + terraform_nap: + name: "Destroy NGINX NIC/App Protect" + runs-on: ubuntu-latest + needs: terraform_policy + defaults: + run: + working-directory: ./nap + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Plan (Destroy) + run: | + terraform plan -destroy -no-color -input=false -lock=false -out=tfplan \ + -var="workspace_path=${{ env.WORKSPACE_PATH }}" \ + -var="nginx_jwt=${{ secrets.NGINX_JWT }}" \ + -var="nginx_pwd=none" + env: + WORKSPACE_PATH: "./nap" + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Destroy + if: github.event_name == 'push' && github.ref == 'refs/heads/destroy-nic-napv5' && steps.check_changes.outputs.has_changes == 'true' + run: | + terraform destroy -auto-approve -input=false -lock=false \ + -var="workspace_path=${{ env.WORKSPACE_PATH }}" \ + -var="nginx_jwt=${{ secrets.NGINX_JWT }}" \ + -var="nginx_pwd=none" + env: + WORKSPACE_PATH: "./nap" + + terraform_eks: + name: "Destroy AWS EKS" + runs-on: ubuntu-latest + needs: terraform_nap + defaults: + run: + working-directory: ./eks-cluster + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Plan (Destroy) + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -destroy -no-color -input=false -out=tfplan -lock=false + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Destroy + if: github.event_name == 'push' && github.ref == 'refs/heads/destroy-nic-napv5' && steps.check_changes.outputs.has_changes == 'true' + run: terraform destroy -auto-approve -input=false -lock=false + + terraform_infra: + name: "Destroy AWS Infra" + runs-on: ubuntu-latest + needs: terraform_eks + defaults: + run: + working-directory: ./infra + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + + - name: Terraform Plan (Destroy) + if: github.event_name == 'pull_request' || github.event_name == 'push' + run: | + terraform plan -destroy -no-color -input=false -out=tfplan -lock=false + terraform show -no-color tfplan > plan.txt + + - name: Check Changes + id: check_changes + run: | + if grep -q "No changes." plan.txt; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Terraform Destroy + if: github.event_name == 'push' && github.ref == 'refs/heads/destroy-nic-napv5' && steps.check_changes.outputs.has_changes == 'true' + run: terraform destroy -auto-approve -input=false -lock=false + + + terraform_S3: + name: "Delete S3/DynamoDB" + needs: terraform_infra + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./s3 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install jq + run: sudo apt-get install -y jq + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Set Bucket Name + id: set_bucket + run: | + echo "bucket_name=akash-terraform-state-bucket" >> $GITHUB_OUTPUT + + - name: Nuclear S3 Bucket Deletion + run: | + set -e + BUCKET_NAME="${{ steps.set_bucket.outputs.bucket_name }}" + + # 1. Delete all object versions (with null checks) + echo "🔥 Deleting ALL object versions..." + versions=$(aws s3api list-object-versions --bucket $BUCKET_NAME --output json || echo '{"Versions":[],"DeleteMarkers":[]}') + versions_to_delete=$(echo $versions | jq '{Objects: [.Versions[]? | {Key:.Key, VersionId:.VersionId}]}' || echo '{"Objects":[]}') + if [ "$(echo $versions_to_delete | jq '.Objects | length')" -gt 0 ]; then + aws s3api delete-objects --bucket $BUCKET_NAME --delete "$versions_to_delete" || true + fi + + # 2. Delete all delete markers (with null checks) + echo "🗑️ Deleting ALL delete markers..." + markers_to_delete=$(echo $versions | jq '{Objects: [.DeleteMarkers[]? | {Key:.Key, VersionId:.VersionId}]}' || echo '{"Objects":[]}') + if [ "$(echo $markers_to_delete | jq '.Objects | length')" -gt 0 ]; then + aws s3api delete-objects --bucket $BUCKET_NAME --delete "$markers_to_delete" || true + fi + + # 3. Force delete any remaining objects + echo "💥 Force deleting any remaining objects..." + aws s3 rm s3://$BUCKET_NAME --recursive --include "*" || true + + # 4. Delete bucket + echo "🚀 Deleting bucket..." + aws s3api delete-bucket --bucket $BUCKET_NAME || true + + # 5. Final verification + if aws s3api head-bucket --bucket $BUCKET_NAME 2>/dev/null; then + echo "::error::Bucket $BUCKET_NAME still exists after deletion attempts!" + exit 1 + else + echo "✅ Bucket $BUCKET_NAME successfully deleted" + fi + + - name: Delete DynamoDB Table + run: | + set -e + TABLE_NAME="terraform-lock-table" + echo "💥 Deleting DynamoDB table..." + if aws dynamodb describe-table --table-name $TABLE_NAME 2>/dev/null; then + aws dynamodb delete-table --table-name $TABLE_NAME || true + echo "⌛ Waiting for table to be deleted..." + aws dynamodb wait table-not-exists --table-name $TABLE_NAME || true + fi + if aws dynamodb describe-table --table-name $TABLE_NAME 2>/dev/null; then + echo "::error::Table $TABLE_NAME still exists!" + exit 1 + else + echo "✅ Table $TABLE_NAME successfully deleted" + fi + + - name: Clean Up IAM Resources + run: | + set -e + # Delete policy + POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='TerraformStateAccess'].Arn" --output text || echo "") + if [ -n "$POLICY_ARN" ]; then + echo "🔗 Detaching policy from roles..." + ATTACHED_ROLES=$(aws iam list-entities-for-policy --policy-arn $POLICY_ARN --query "PolicyRoles[].RoleName" --output text || echo "") + for ROLE in $ATTACHED_ROLES; do + aws iam detach-role-policy --role-name $ROLE --policy-arn $POLICY_ARN || true + done + + echo "🗑️ Deleting policy..." + aws iam delete-policy --policy-arn $POLICY_ARN || true + fi + + # Delete role + ROLE_NAME="TerraformCIExecutionRole" + if aws iam get-role --role-name $ROLE_NAME 2>/dev/null; then + echo "🗑️ Deleting role..." + aws iam delete-role --role-name $ROLE_NAME || true + fi + + - name: Verify Deletion + run: | + echo "✅ Verification:" + + # Verify S3 bucket + BUCKET_NAME="${{ steps.set_bucket.outputs.bucket_name }}" + if aws s3api head-bucket --bucket "$BUCKET_NAME" 2>/dev/null; then + echo "::error::Bucket $BUCKET_NAME still exists!" + exit 1 + else + echo "Bucket $BUCKET_NAME deleted successfully" + fi + + # Verify DynamoDB table + TABLE_NAME="terraform-lock-table" + if aws dynamodb describe-table --table-name "$TABLE_NAME" 2>/dev/null; then + echo "::error::Table $TABLE_NAME still exists!" + exit 1 + else + echo "Table $TABLE_NAME deleted successfully" + fi + + # Verify IAM resources + if aws iam get-policy --policy-arn "arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):policy/TerraformStateAccess" 2>/dev/null; then + echo "::error::IAM Policy still exists!" + exit 1 + else + echo "IAM Policy deleted successfully" + fi + + if aws iam get-role --role-name "TerraformCIExecutionRole" 2>/dev/null; then + echo "::error::IAM Role still exists!" + exit 1 + else + echo "IAM Role deleted successfully" + fi diff --git a/README.md b/README.md index 052d9357..39239896 100644 --- a/README.md +++ b/README.md @@ -80,18 +80,27 @@ This workflow requires the following secrets to be configured in your GitHub rep * resource_owner = "Your-name" * aws_region = "AWS Region" ex. us-east-1 * azs = ["us-east-1a", "us-east1b"] - Change to Correct Availability Zones based on selected Region - * Also update assets boolean value as per your work-flows -**STEP 3:** In the `S3 directory`, inside the `variable.tf` file modify the following data - * description = "S3 bucket for Terraform remote state storage" + +**STEP 3:** Modify the `S3/variable.tf` file inside the `S3 directory`. * default = "your-unique-bucket-name" # Replace with your actual bucket name - -**STEP 4:** Commit and push your build branch to your forked repo +**STEP 4:** Modify the `Backend.tf` file in the `Infra/Backend.tf`, `eks-cluster/Backend.tf`, `Nap/Backend.tf`, `Policy/Backend.tf`, `Arcadia/Backend.tf` directory. + * bucket = "your-unique-bucket-name" # Your S3 bucket name + * key = "infra/terraform.tfstate" # Path to state file + * region = "your-aws-region-name" By default us-east-1 + +**STEP 5:** Add the name of your S3 bucket inside the `NGINX V5-NIC/NAP Destroy` workflow file, which is located in the Terraform _S3 job. + * name: Set Bucket Name + * id: set_bucket + * run: | + * echo "bucket_name="your-unique-bucket-name" >> $GITHUB_OUTPUT + +**STEP 6:** Commit and push your build branch to your forked repo * Build will run and can be monitored in the GitHub Actions tab and TF Cloud console -**STEP 5:** Once the pipeline is complete, verify that your assets were deployed or destroyed based on your workflow. +**STEP 7:** Once the pipeline is complete, verify that your assets were deployed or destroyed based on your workflow. **NOTE:** The autocert process takes time. It may be 5 to 10 minutes before Let's Encrypt has provided the cert. diff --git a/arcadia/backend.tf b/arcadia/backend.tf new file mode 100644 index 00000000..8c2b3dab --- /dev/null +++ b/arcadia/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "arcadia/terraform.tfstate" # Path to state file + region = "us-east-1" # AWS region + dynamodb_table = "terraform-lock-table" # DynamoDB table for state locking + encrypt = true # Encrypt state file at rest + } +} \ No newline at end of file diff --git a/arcadia/data.tf b/arcadia/data.tf index dbe26856..cc8f10b4 100644 --- a/arcadia/data.tf +++ b/arcadia/data.tf @@ -1,21 +1,33 @@ -data "tfe_outputs" "infra" { - organization = var.tf_cloud_organization - workspace = "infra" +# Read infra state from S3 +data "terraform_remote_state" "infra" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "infra/terraform.tfstate" # Path to infra's state file + region = "us-east-1" # AWS region + } } -data "tfe_outputs" "eks" { - organization = var.tf_cloud_organization - workspace = "eks" + + +data "terraform_remote_state" "nap" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "nap/terraform.tfstate" # Path to NAP state file + region = "us-east-1" # AWS region + } } -data "tfe_outputs" "nap" { - count = data.tfe_outputs.infra.values.nap ? 1 : 0 - organization = var.tf_cloud_organization - workspace = "nap" -} -data "tfe_outputs" "nic" { - count = data.tfe_outputs.infra.values.nic ? 1 : 0 - organization = var.tf_cloud_organization - workspace = "nic" + +data "terraform_remote_state" "eks" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "eks-cluster/terraform.tfstate" # Path to EKS state file + region = "us-east-1" # AWS region + } } + +# Get EKS cluster auth using S3 state data "aws_eks_cluster_auth" "auth" { - name = data.tfe_outputs.eks.values.cluster_name -} + name = data.terraform_remote_state.eks.outputs.cluster_name +} \ No newline at end of file diff --git a/arcadia/locals.tf b/arcadia/locals.tf index e38109f0..b92cfbdb 100644 --- a/arcadia/locals.tf +++ b/arcadia/locals.tf @@ -1,9 +1,8 @@ locals { - project_prefix = data.tfe_outputs.infra.values.project_prefix - #external_name = try(data.tfe_outputs.nap.values.external_name, data.tfe_outputs.nic.values.external_name, "arcadia-cd-demo.sr.f5-cloud-demo.com") - external_name = try(data.tfe_outputs.nap[0].values.external_name, data.tfe_outputs.nic[0].values.external_name) - aws_region = data.tfe_outputs.infra.values.aws_region - host = data.tfe_outputs.eks.values.cluster_endpoint - cluster_ca_certificate = data.tfe_outputs.eks.values.kubeconfig-certificate-authority-data - cluster_name = data.tfe_outputs.eks.values.cluster_name -} \ No newline at end of file + project_prefix = data.terraform_remote_state.infra.outputs.project_prefix + aws_region = data.terraform_remote_state.infra.outputs.aws_region + external_name = try(data.terraform_remote_state.nap.outputs.external_name, "arcadia-cd-demo.sr.f5-cloud-demo.com") + host = data.terraform_remote_state.eks.outputs.cluster_endpoint + cluster_ca_certificate = data.terraform_remote_state.eks.outputs.kubeconfig-certificate-authority-data + cluster_name = data.terraform_remote_state.eks.outputs.cluster_name +} \ No newline at end of file diff --git a/arcadia/versions.tf b/arcadia/versions.tf index 49810ae2..fc823ab0 100644 --- a/arcadia/versions.tf +++ b/arcadia/versions.tf @@ -1,14 +1,23 @@ terraform { - required_version = ">= 0.14.0" + required_version = ">= 1.6.0" + required_providers { - aws = ">= 4" + aws = { + source = "hashicorp/aws" + version = ">= 4.0.0" + } kubernetes = { - source = "hashicorp/kubernetes" - version = "2.16.1" + source = "hashicorp/kubernetes" + version = ">= 2.23.0" } helm = { source = "hashicorp/helm" - version = ">=2.7.0" + version = ">= 2.12.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.15.0" } } -} \ No newline at end of file + +} diff --git a/arcadia/virtual.tf b/arcadia/virtual.tf new file mode 100644 index 00000000..14822512 --- /dev/null +++ b/arcadia/virtual.tf @@ -0,0 +1,70 @@ +resource "kubernetes_manifest" "arcadia_virtualserver" { + manifest = { + apiVersion = "k8s.nginx.org/v1" + kind = "VirtualServer" + metadata = { + name = "arcadia-virtualserver" + namespace = "default" + } + spec = { + host = try(data.terraform_remote_state.nap.outputs.external_name, "arcadia-cd-demo.sr.f5-cloud-demo.com") + + # Reference the WAF policy + policies = [ + { + name = "waf-policy" # Name of the WAF policy + namespace = "default" # Namespace where the WAF policy is deployed + } + ] + + upstreams = [ + { + name = "main-upstream" + service = kubernetes_service.main.metadata[0].name + port = 80 + }, + { + name = "backend-upstream" + service = kubernetes_service.backend.metadata[0].name + port = 80 + }, + { + name = "app2-upstream" + service = kubernetes_service.app_2.metadata[0].name + port = 80 + }, + { + name = "app3-upstream" + service = kubernetes_service.app_3.metadata[0].name + port = 80 + } + ] + routes = [ + { + path = "/" + action = { + pass = "main-upstream" + } + }, + { + path = "/files" + action = { + pass = "backend-upstream" + } + }, + { + path = "/api" + action = { + pass = "app2-upstream" + } + }, + { + path = "/app3" + action = { + pass = "app3-upstream" + } + } + ] + } + } +} \ No newline at end of file diff --git a/arcadia/wafpolicy.tf b/arcadia/wafpolicy.tf new file mode 100644 index 00000000..e68eefa3 --- /dev/null +++ b/arcadia/wafpolicy.tf @@ -0,0 +1,16 @@ +resource "kubernetes_manifest" "waf_policy" { + manifest = { + apiVersion = "k8s.nginx.org/v1" + kind = "Policy" + metadata = { + name = "waf-policy" + namespace = "default" # Replace with your desired namespace + } + spec = { + waf = { + enable = true + apBundle = "compiled_policy.tgz" + } + } + } +} diff --git a/eks-cluster/.terraform/modules/modules.json b/eks-cluster/.terraform/modules/modules.json new file mode 100644 index 00000000..9fed0aeb --- /dev/null +++ b/eks-cluster/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"subnet_addrs","Source":"registry.terraform.io/hashicorp/subnets/cidr","Version":"1.0.0","Dir":".terraform/modules/subnet_addrs"}]} \ No newline at end of file diff --git a/eks-cluster/.terraform/modules/subnet_addrs b/eks-cluster/.terraform/modules/subnet_addrs new file mode 160000 index 00000000..52ca061a --- /dev/null +++ b/eks-cluster/.terraform/modules/subnet_addrs @@ -0,0 +1 @@ +Subproject commit 52ca061aaea2e8f58c91ac03ca1fae45e44c28bf diff --git a/eks-cluster/backend.tf b/eks-cluster/backend.tf new file mode 100644 index 00000000..aa26cccc --- /dev/null +++ b/eks-cluster/backend.tf @@ -0,0 +1,10 @@ +terrafrom { + backend "s3" { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "eks-cluster/terraform.tfstate" # Path to state file + region = "us-east-1" # AWS region + dynamodb_table = "terraform-lock-table" # DynamoDB table for state locking + encrypt = true + + } +} \ No newline at end of file diff --git a/eks-cluster/data.tf b/eks-cluster/data.tf index 27125a9a..50c442bb 100644 --- a/eks-cluster/data.tf +++ b/eks-cluster/data.tf @@ -1,5 +1,9 @@ -data "tfe_outputs" "infra" { - organization = var.tf_cloud_organization - workspace = "infra" +data "terraform_remote_state" "infra" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "infra/terraform.tfstate" # Path to infra's state file + region = "us-east-1" # AWS region + } } diff --git a/eks-cluster/eks_cluster.tf b/eks-cluster/eks_cluster.tf index bf00a428..9fa0f0df 100644 --- a/eks-cluster/eks_cluster.tf +++ b/eks-cluster/eks_cluster.tf @@ -1,4 +1,4 @@ - +# eks-cluster/ekscluster.tf # Create EKS cluster and node groups resource "aws_eks_cluster" "eks-tf" { @@ -28,8 +28,8 @@ resource "aws_eks_node_group" "private-node-group-1-tf" { instance_types = ["t3.medium"] scaling_config { - desired_size = 2 - max_size = 3 + desired_size = 1 + max_size = 2 min_size = 1 } @@ -44,13 +44,6 @@ resource "aws_eks_node_group" "private-node-group-1-tf" { ] } - resource "aws_eks_addon" "cluster-addons" { - for_each = { for addon in var.eks_addons : addon.name => addon } - cluster_name = aws_eks_cluster.eks-tf.id - addon_name = each.value.name - #addon_version = each.value.version - resolve_conflicts = "OVERWRITE" - } resource "aws_eks_node_group" "private-node-group-2-tf" { cluster_name = aws_eks_cluster.eks-tf.name @@ -61,8 +54,8 @@ resource "aws_eks_node_group" "private-node-group-2-tf" { instance_types = ["t3.medium"] scaling_config { - desired_size = 2 - max_size = 3 + desired_size = 1 + max_size = 2 min_size = 1 } @@ -76,3 +69,52 @@ resource "aws_eks_node_group" "private-node-group-2-tf" { aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly, ] } + +# Create EKS Addons +resource "aws_eks_addon" "cluster-addons" { + for_each = { for addon in var.eks_addons : addon.name => addon } + cluster_name = aws_eks_cluster.eks-tf.id + addon_name = each.value.name + resolve_conflicts = "OVERWRITE" + + # Add service account role ARN for EBS CSI driver + service_account_role_arn = each.value.name == "aws-ebs-csi-driver" ? aws_iam_role.ebs_csi_driver.arn : null + + depends_on = [ + aws_eks_node_group.private-node-group-1-tf, + aws_eks_node_group.private-node-group-2-tf, + aws_iam_role.ebs_csi_driver + ] +} + +output "kubeconfig" { + value = <<EOT +apiVersion: v1 +clusters: +- cluster: + server: ${aws_eks_cluster.eks-tf.endpoint} + certificate-authority-data: ${aws_eks_cluster.eks-tf.certificate_authority[0].data} + name: ${aws_eks_cluster.eks-tf.arn} # Use ARN as context name +contexts: +- context: + cluster: ${aws_eks_cluster.eks-tf.arn} + user: ${aws_eks_cluster.eks-tf.arn} + name: ${aws_eks_cluster.eks-tf.arn} # Dynamic context name +current-context: ${aws_eks_cluster.eks-tf.arn} +kind: Config +users: +- name: ${aws_eks_cluster.eks-tf.arn} + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: aws + args: + - "eks" + - "get-token" + - "--cluster-name" + - ${aws_eks_cluster.eks-tf.name} + - "--region" + - ${local.aws_region} +EOT + sensitive = true +} diff --git a/eks-cluster/iam.tf b/eks-cluster/iam.tf index fe82d707..2dbeeb40 100644 --- a/eks-cluster/iam.tf +++ b/eks-cluster/iam.tf @@ -1,196 +1,142 @@ - -# Create IAM role for the EKS cluster - resource "aws_iam_role" "eks-iam-role" { - name = format("%s-eks-iam-role-%s", local.project_prefix, local.build_suffix) + name = format("%s-eks-iam-role-%s", local.project_prefix, local.build_suffix) + path = "/" - path = "/" - - assume_role_policy = <<EOF + assume_role_policy = <<EOF { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "eks.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] } EOF } + resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" - role = aws_iam_role.eks-iam-role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + role = aws_iam_role.eks-iam-role.name } + resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly-EKS" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - role = aws_iam_role.eks-iam-role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + role = aws_iam_role.eks-iam-role.name } -# Create IAM role for the worker nodes +# OIDC Provider Configuration (Unchanged) +data "aws_eks_cluster" "cluster" { + name = aws_eks_cluster.eks-tf.name +} + +locals { + oidc_issuer_url = replace(data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer, "https://", "") +} +data "tls_certificate" "eks_oidc" { + url = data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer +} + +resource "aws_iam_openid_connect_provider" "oidc" { + url = data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer + client_id_list = ["sts.amazonaws.com"] + thumbprint_list = [data.tls_certificate.eks_oidc.certificates[0].sha1_fingerprint] +} +locals { + oidc_provider_arn = aws_iam_openid_connect_provider.oidc.arn +} + +# Worker Node IAM Role (Fixed) resource "aws_iam_role" "workernodes" { name = format("%s-eks-node-iam-role-%s", local.project_prefix, local.build_suffix) - + assume_role_policy = jsonencode({ - Statement = [{ - Action = "sts:AssumeRole" - Effect = "Allow" - Principal = { - Service = "ec2.amazonaws.com" - } - }] - Version = "2012-10-17" + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Principal = { + Service = "ec2.amazonaws.com" + }, + Action = "sts:AssumeRole" + } + ] }) - } - - resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" { +} + +# Standard Node Policies (Unchanged) +resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" - role = aws_iam_role.workernodes.name - } - - resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" { + role = aws_iam_role.workernodes.name +} + +resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" - role = aws_iam_role.workernodes.name - } - - resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilderECRContainerBuilds" { + role = aws_iam_role.workernodes.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilderECRContainerBuilds" { policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds" - role = aws_iam_role.workernodes.name - } - - resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" { + role = aws_iam_role.workernodes.name +} + +resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" { policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - role = aws_iam_role.workernodes.name - } + role = aws_iam_role.workernodes.name +} - resource "aws_iam_policy" "workernodes_ebs_policy" { - name = format("%s-ebs_csi_driver-%s", local.project_prefix, local.build_suffix) +# Removed the custom EBS policy and its attachment - using managed policy instead - policy = jsonencode ( -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DetachVolume", - "ec2:AttachVolume", - "ec2:ModifyVolume", - "ec2:DescribeInstances", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:CreateSnapshot", - "ec2:DescribeVolumesModifications", - "ec2:DescribeSnapshots" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:CreateTags", - "Resource": [ - "arn:aws:ec2:*:*:volume/*", - "arn:aws:ec2:*:*:snapshot/*" - ], - "Condition": { - "StringEquals": { - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - } - }, - { - "Effect": "Allow", - "Action": "ec2:DeleteTags", - "Resource": [ - "arn:aws:ec2:*:*:volume/*", - "arn:aws:ec2:*:*:snapshot/*" - ] - }, - { - "Effect": "Allow", - "Action": "ec2:CreateVolume", - "Resource": "*", - "Condition": { - "StringLike": { - "aws:RequestTag/ebs.csi.aws.com/cluster": "true" - } - } - }, - { - "Effect": "Allow", - "Action": "ec2:CreateVolume", - "Resource": "*", - "Condition": { - "StringLike": { - "aws:RequestTag/CSIVolumeName": "*" - } - } - }, - { - "Effect": "Allow", - "Action": "ec2:DeleteVolume", - "Resource": "*", - "Condition": { - "StringLike": { - "ec2:ResourceTag/ebs.csi.aws.com/cluster": "true" - } - } - }, - { - "Effect": "Allow", - "Action": "ec2:DeleteVolume", - "Resource": "*", - "Condition": { - "StringLike": { - "ec2:ResourceTag/CSIVolumeName": "*" - } - } - }, - { - "Effect": "Allow", - "Action": "ec2:DeleteVolume", - "Resource": "*", - "Condition": { - "StringLike": { - "ec2:ResourceTag/kubernetes.io/created-for/pvc/name": "*" - } - } - }, - { - "Effect": "Allow", - "Action": "ec2:DeleteSnapshot", - "Resource": "*", - "Condition": { - "StringLike": { - "ec2:ResourceTag/CSIVolumeSnapshotName": "*" - } - } +# IAM Instance Profile (Unchanged) +resource "aws_iam_instance_profile" "workernodes" { + name = format("%s-eks-node-instance-profile-%s", local.project_prefix, local.build_suffix) + role = aws_iam_role.workernodes.name +} + +resource "aws_iam_role" "ebs_csi_driver" { + name = format("%s-ebs-csi-driver-role-%s", local.project_prefix, local.build_suffix) + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Principal = { + Federated = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${replace( + aws_eks_cluster.eks-tf.identity[0].oidc[0].issuer, + "https://", + "" + )}" }, - { - "Effect": "Allow", - "Action": "ec2:DeleteSnapshot", - "Resource": "*", - "Condition": { - "StringLike": { - "ec2:ResourceTag/ebs.csi.aws.com/cluster": "true" - } - } + Action = "sts:AssumeRoleWithWebIdentity", + Condition = { + StringEquals = { + "${replace( + aws_eks_cluster.eks-tf.identity[0].oidc[0].issuer, + "https://", + "" + )}:sub" = "system:serviceaccount:kube-system:ebs-csi-controller-sa", + "${replace( + aws_eks_cluster.eks-tf.identity[0].oidc[0].issuer, + "https://", + "" + )}:aud" = "sts.amazonaws.com" + } } + } ] -}) -#POLICY + }) } -resource "aws_iam_role_policy_attachment" "workernodes-AmazonEBSCSIDriver" { - policy_arn = aws_iam_policy.workernodes_ebs_policy.arn - role = aws_iam_role.workernodes.name -} \ No newline at end of file +data "aws_caller_identity" "current" {} + +# Use the official AWS managed policy instead of custom one +resource "aws_iam_role_policy_attachment" "ebs_csi_driver" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" + role = aws_iam_role.ebs_csi_driver.name +} diff --git a/eks-cluster/locals.tf b/eks-cluster/locals.tf index 0ae13f72..1fd0e52c 100644 --- a/eks-cluster/locals.tf +++ b/eks-cluster/locals.tf @@ -1,13 +1,13 @@ locals { - project_prefix = data.tfe_outputs.infra.values.project_prefix - resource_owner = data.tfe_outputs.infra.values.resource_owner - build_suffix = data.tfe_outputs.infra.values.build_suffix - aws_region = data.tfe_outputs.infra.values.aws_region - azs = data.tfe_outputs.infra.values.azs - vpc_id = data.tfe_outputs.infra.values.vpc_id - vpc_main_route_table_id = data.tfe_outputs.infra.values.vpc_main_route_table_id - public_subnet_ids = data.tfe_outputs.infra.values.public_subnet_ids - eks_cidr = data.tfe_outputs.infra.values.eks_cidr - internal_sg_id = data.tfe_outputs.infra.values.internal_sg_id - cluster_name = format("%s-eks-cluster-%s", local.project_prefix, local.build_suffix) + project_prefix = data.terraform_remote_state.infra.outputs.project_prefix + resource_owner = data.terraform_remote_state.infra.outputs.resource_owner + build_suffix = data.terraform_remote_state.infra.outputs.build_suffix + aws_region = data.terraform_remote_state.infra.outputs.aws_region + azs = data.terraform_remote_state.infra.outputs.azs + vpc_id = data.terraform_remote_state.infra.outputs.vpc_id + vpc_main_route_table_id = data.terraform_remote_state.infra.outputs.vpc_main_route_table_id + public_subnet_ids = data.terraform_remote_state.infra.outputs.public_subnet_ids + eks_cidr = data.terraform_remote_state.infra.outputs.eks_cidr[0] # Use the first CIDR block if eks_cidr is a list + internal_sg_id = data.terraform_remote_state.infra.outputs.internal_sg_id + cluster_name = format("%s-eks-cluster-%s", local.project_prefix, local.build_suffix) } \ No newline at end of file diff --git a/eks-cluster/network.tf b/eks-cluster/network.tf index d325c2ef..ca1aeed4 100644 --- a/eks-cluster/network.tf +++ b/eks-cluster/network.tf @@ -1,8 +1,6 @@ - - # Create Elastic IP resource "aws_eip" "main" { - vpc = true + vpc = true tags = { resource_owner = local.resource_owner Name = format("%s-eip-%s", local.project_prefix, local.build_suffix) @@ -20,46 +18,40 @@ resource "aws_nat_gateway" "main" { } } -module subnet_addrs { - for_each = nonsensitive(toset(local.azs)) - source = "hashicorp/subnets/cidr" - version = "1.0.0" - base_cidr_block = cidrsubnet(local.eks_cidr,2,index(local.azs,each.key)) - networks = [ - { - name = "eks-internal" - new_bits = 1 - }, - { - name = "eks-external" - new_bits = 1 - } - ] +# Calculate subnet CIDR blocks using cidrsubnet +locals { + eks_internal_cidrs = [for i, az in local.azs : cidrsubnet(local.eks_cidr, 2, i)] + eks_external_cidrs = [for i, az in local.azs : cidrsubnet(local.eks_cidr, 2, length(local.azs) + i)] } +# Create EKS internal subnets resource "aws_subnet" "eks-internal" { - for_each = nonsensitive(toset(local.azs)) + for_each = toset(local.azs) vpc_id = local.vpc_id - cidr_block = module.subnet_addrs[each.key].network_cidr_blocks["eks-internal"] + cidr_block = local.eks_internal_cidrs[index(local.azs, each.key)] availability_zone = each.key - tags = { - Name = format("%s-eks-int-subnet-%s",local.project_prefix,each.key) + tags = { + Name = format("%s-eks-int-subnet-%s", local.project_prefix, each.key) "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/role/internal-elb" = "1" } } + +# Create EKS external subnets resource "aws_subnet" "eks-external" { - for_each = nonsensitive(toset(local.azs)) - vpc_id = local.vpc_id - cidr_block = module.subnet_addrs[each.key].network_cidr_blocks["eks-external"] + for_each = toset(local.azs) + vpc_id = local.vpc_id + cidr_block = local.eks_external_cidrs[index(local.azs, each.key)] map_public_ip_on_launch = true - availability_zone = each.key - tags = { - Name = format("%s-eks-ext-subnet-%s",local.project_prefix,each.key) + availability_zone = each.key + tags = { + Name = format("%s-eks-ext-subnet-%s", local.project_prefix, each.key) "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/elb" = "1" } } + +# Create route table for NAT Gateway resource "aws_route_table" "main" { vpc_id = local.vpc_id route { @@ -70,16 +62,17 @@ resource "aws_route_table" "main" { Name = format("%s-eks-rt-%s", local.project_prefix, local.build_suffix) } } + +# Associate internal subnets with the route table resource "aws_route_table_association" "internal-subnet-association" { - for_each = nonsensitive(toset(local.azs)) + for_each = toset(local.azs) subnet_id = aws_subnet.eks-internal[each.key].id route_table_id = aws_route_table.main.id } + +# Associate external subnets with the main route table resource "aws_route_table_association" "external-subnet-association" { - for_each = nonsensitive(toset(local.azs)) + for_each = toset(local.azs) subnet_id = aws_subnet.eks-external[each.key].id route_table_id = local.vpc_main_route_table_id -} - - - +} \ No newline at end of file diff --git a/eks-cluster/outputs.tf b/eks-cluster/outputs.tf index 7e7d9364..e6b489c3 100644 --- a/eks-cluster/outputs.tf +++ b/eks-cluster/outputs.tf @@ -8,6 +8,16 @@ output "cluster_endpoint" { value = aws_eks_cluster.eks-tf.endpoint } + +output "oidc_provider_details" { + value = { + url = aws_eks_cluster.eks-tf.identity[0].oidc[0].issuer + issuer_url = local.oidc_issuer_url + provider_arn = aws_iam_openid_connect_provider.oidc.arn + account_id = data.aws_caller_identity.current.account_id + } +} + output "cluster_name" { description = "Kubernetes Cluster Name" value = nonsensitive(aws_eks_cluster.eks-tf.name) @@ -23,3 +33,11 @@ output "node_security_group_id" { value = aws_security_group.eks_nodes.id } +output "aws_region" { + description = "The AWS region where the EKS cluster is deployed" + value = var.aws_region +} + +output "ebs_csi_driver_role_arn" { + value = aws_iam_role.ebs_csi_driver.arn +} \ No newline at end of file diff --git a/eks-cluster/variables.tf b/eks-cluster/variables.tf index 116fa3f2..3441659d 100644 --- a/eks-cluster/variables.tf +++ b/eks-cluster/variables.tf @@ -1,14 +1,16 @@ -#Project Globals +#Project Global variable "admin_src_addr" { type = string description = "Allowed Admin source IP prefix" default = "0.0.0.0/0" } -#TF Cloud -variable "tf_cloud_organization" { - type = string - description = "TF cloud org (Value set in TF cloud)" + +variable "aws_region" { + description = "The AWS region to deploy the EKS cluster" + type = string + default = "us-east-1" } + #AWS variable "eks_addons" { type = list(object({ @@ -18,29 +20,28 @@ variable "eks_addons" { default = [ { name = "kube-proxy" - version = "v1.25.6-eksbuild.1" + version = "v1.29.10-eksbuild.3" }, { name = "vpc-cni" - version = "v1.12.2-eksbuild.1" + version = "v1.19.0-eksbuild.1" }, { name = "coredns" - version = "v1.8.7-eksbuild.3" + version = "v1.11.1-eksbuild.4" }, { name = "aws-ebs-csi-driver" - version = "v1.13.0-eksbuild.3" + version = "v1.39.0-eksbuild.1" } ] } -variable "ssh_key" { - type = string - description = "Unneeded for EKS, only present for warning handling with TF cloud variable set" -} - - +variable "create_oidc_provider" { + description = "Whether to create OIDC provider for IRSA" + type = bool + default = true +} diff --git a/eks-cluster/versions.tf b/eks-cluster/versions.tf index eca6ade9..fcb35183 100644 --- a/eks-cluster/versions.tf +++ b/eks-cluster/versions.tf @@ -2,5 +2,10 @@ terraform { required_version = ">= 0.14.0" required_providers { aws = ">= 4" + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0" + } } -} \ No newline at end of file + +} diff --git a/infra/.gitattributes b/infra/.gitattributes new file mode 100644 index 00000000..21c23bce --- /dev/null +++ b/infra/.gitattributes @@ -0,0 +1 @@ +infra/.terraform/providers/registry.terraform.io/hashicorp/aws/5.91.0/darwin_arm64/terraform-provider-aws_v5.91.0_x5 filter=lfs diff=lfs merge=lfs -text diff --git a/infra/backend.tf b/infra/backend.tf new file mode 100644 index 00000000..0d775e52 --- /dev/null +++ b/infra/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "infra/terraform.tfstate" # Path to state file + region = "us-east-1" # AWS region + dynamodb_table = "terraform-lock-table" # DynamoDB table for state locking + encrypt = true + } +} diff --git a/infra/network.tf b/infra/network.tf index 1f9bb8b6..f96b3471 100644 --- a/infra/network.tf +++ b/infra/network.tf @@ -1,73 +1,77 @@ -############################ VPC ############################ - -# Create VPC, subnets, route tables, and IGW -data "aws_availability_zones" "available" { - state = "available" -} - -locals { - valid_azs = [for az in data.aws_availability_zones.available.names : az if az != "us-west-1a"] +# Declare the random_id resource to generate a suffix +resource "random_id" "build_suffix" { + byte_length = 8 } +# VPC Module Configuration module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = ">= 4.0" - name = "${var.project_prefix}-vpc-${random_id.build_suffix.hex}" - cidr = var.cidr - azs = local.valid_azs + source = "terraform-aws-modules/vpc/aws" + version = "~> 5.19.0" # Correct version constraint + + name = "${var.project_prefix}-vpc-${random_id.build_suffix.hex}" + cidr = var.cidr + azs = var.azs + enable_dns_support = true enable_dns_hostnames = true + tags = { + Name = "${var.project_prefix}-vpc-${random_id.build_suffix.hex}" resource_owner = var.resource_owner - Name = "${var.project_prefix}-vpc-${random_id.build_suffix.hex}" } } +# Internet Gateway resource "aws_internet_gateway" "igw" { vpc_id = module.vpc.vpc_id - tags = { + tags = { Name = "${var.project_prefix}-igw-${random_id.build_suffix.hex}" } } -module subnet_addrs { - for_each = toset(local.valid_azs) - source = "hashicorp/subnets/cidr" - version = ">= 1.0.0" - base_cidr_block = cidrsubnet(module.vpc.vpc_cidr_block,4,index(local.valid_azs,each.key)) - networks = [ - { name = "management", new_bits = 8 }, - { name = "internal", new_bits = 6 }, - { name = "external", new_bits = 6 }, - { name = "app-cidr", new_bits = 4 } - ] -} - -resource "aws_subnet" "internal" { - for_each = toset(local.valid_azs) - vpc_id = module.vpc.vpc_id - cidr_block = module.subnet_addrs[each.key].network_cidr_blocks["internal"] +# Subnets +resource "aws_subnet" "management" { + for_each = toset(var.azs) + vpc_id = module.vpc.vpc_id + cidr_block = cidrsubnet(module.vpc.vpc_cidr_block, 4, index(var.azs, each.key) * 4) availability_zone = each.key - tags = { Name = format("%s-int-subnet-%s", var.project_prefix, each.key) } + tags = { + Name = format("%s-mgmt-subnet-%s", var.project_prefix, each.key) + } } -resource "aws_subnet" "management" { - for_each = toset(local.valid_azs) - vpc_id = module.vpc.vpc_id - cidr_block = module.subnet_addrs[each.key].network_cidr_blocks["management"] +resource "aws_subnet" "internal" { + for_each = toset(var.azs) + vpc_id = module.vpc.vpc_id + cidr_block = cidrsubnet(module.vpc.vpc_cidr_block, 4, index(var.azs, each.key) * 4 + 1) availability_zone = each.key - tags = { Name = format("%s-mgmt-subnet-%s", var.project_prefix, each.key) } + tags = { + Name = format("%s-int-subnet-%s", var.project_prefix, each.key) + } } resource "aws_subnet" "external" { - for_each = toset(local.valid_azs) - vpc_id = module.vpc.vpc_id - cidr_block = module.subnet_addrs[each.key].network_cidr_blocks["external"] + for_each = toset(var.azs) + vpc_id = module.vpc.vpc_id + cidr_block = cidrsubnet(module.vpc.vpc_cidr_block, 4, index(var.azs, each.key) * 4 + 2) map_public_ip_on_launch = true + availability_zone = each.key + tags = { + Name = format("%s-ext-subnet-%s", var.project_prefix, each.key) + } +} + +resource "aws_subnet" "app_cidr" { + for_each = toset(var.azs) + vpc_id = module.vpc.vpc_id + cidr_block = cidrsubnet(module.vpc.vpc_cidr_block, 4, index(var.azs, each.key) * 4 + 3) availability_zone = each.key - tags = { Name = format("%s-ext-subnet-%s", var.project_prefix, each.key) } + tags = { + Name = format("%s-app-subnet-%s", var.project_prefix, each.key) + } } +# Route Table resource "aws_route_table" "main" { vpc_id = module.vpc.vpc_id route { @@ -79,21 +83,27 @@ resource "aws_route_table" "main" { } } +# Route Table Associations resource "aws_route_table_association" "subnet-association-internal" { - for_each = toset(local.valid_azs) + for_each = toset(var.azs) subnet_id = aws_subnet.internal[each.key].id route_table_id = aws_route_table.main.id } resource "aws_route_table_association" "subnet-association-management" { - for_each = toset(local.valid_azs) + for_each = toset(var.azs) subnet_id = aws_subnet.management[each.key].id route_table_id = aws_route_table.main.id } resource "aws_route_table_association" "subnet-association-external" { - for_each = toset(local.valid_azs) + for_each = toset(var.azs) subnet_id = aws_subnet.external[each.key].id route_table_id = aws_route_table.main.id } +resource "aws_route_table_association" "subnet-association-app-cidr" { + for_each = toset(var.azs) + subnet_id = aws_subnet.app_cidr[each.key].id + route_table_id = aws_route_table.main.id +} \ No newline at end of file diff --git a/infra/outputs.tf b/infra/outputs.tf index a6c7d3a7..923e8384 100644 --- a/infra/outputs.tf +++ b/infra/outputs.tf @@ -1,100 +1,137 @@ -#Global +# Global Outputs output "project_prefix" { value = var.project_prefix } + output "resource_owner" { value = var.resource_owner } + output "build_suffix" { value = random_id.build_suffix.hex } -#Outputs + +# AWS Region and Availability Zones output "aws_region" { value = var.aws_region } + output "azs" { value = var.azs } + +# VPC Details output "vpc_cidr_block" { description = "CIDR Block" value = module.vpc.vpc_cidr_block } + output "vpc_id" { description = "VPC ID" value = module.vpc.vpc_id } + output "vpc_main_route_table_id" { - value = aws_route_table.main.id + value = aws_route_table.main.id # Reference the route table directly created } + +# Subnet Information output "public_subnet_ids" { - value = [values(aws_subnet.external)[0].id, values(aws_subnet.external)[1].id] + value = [for k, subnet in aws_subnet.external : subnet.id] # Reference the external subnets directly } -output "private_cidr_blocks" { - value = [values(aws_subnet.internal)[0].cidr_block, values(aws_subnet.internal)[1].cidr_block] + +output "private_subnet_ids" { + value = [for k, subnet in aws_subnet.internal : subnet.id] # Reference the internal subnets directly +} + +output "management_subnet_ids" { + value = [for k, subnet in aws_subnet.management : subnet.id] # Reference the management subnets directly } + output "public_cidr_blocks" { - value = [values(aws_subnet.external)[0].cidr_block, values(aws_subnet.external)[1].cidr_block] + value = [for k, subnet in aws_subnet.external : subnet.cidr_block] # Directly reference external subnets CIDR } -output "management_cidr_blocks" { - value = [values(aws_subnet.management)[0].cidr_block, values(aws_subnet.management)[1].cidr_block] + +output "private_cidr_blocks" { + value = [for k, subnet in aws_subnet.internal : subnet.cidr_block] # Directly reference internal subnets CIDR } -output "private_subnet_ids" { - value = [values(aws_subnet.internal)[0].id, values(aws_subnet.internal)[1].id] + +output "management_cidr_blocks" { + value = [for k, subnet in aws_subnet.management : subnet.cidr_block] # Directly reference management subnets CIDR } + +# Specific AZ Subnet CIDR Blocks output "public_az1_cidr_block" { -value = values(aws_subnet.external)[0].cidr_block + value = aws_subnet.external[element(tolist(var.azs), 0)].cidr_block # Reference AZ1's public CIDR } + output "private_az1_cidr_block" { -value = values(aws_subnet.internal)[0].cidr_block + value = aws_subnet.internal[element(tolist(var.azs), 0)].cidr_block # Reference AZ1's private CIDR } -output "app_cidr" { - description = "Application server(Juice Shop) CIDR block" - value = values(module.subnet_addrs)[0].network_cidr_blocks.app-cidr + +output "public_az2_cidr_block" { + value = aws_subnet.external[element(tolist(var.azs), 1)].cidr_block # Reference AZ2's public CIDR } -output "eks_cidr" { - description = "Application server(EKS) CIDR block" - value = values(module.subnet_addrs)[1].network_cidr_blocks.app-cidr + +output "private_az2_cidr_block" { + value = aws_subnet.internal[element(tolist(var.azs), 1)].cidr_block # Reference AZ2's private CIDR } +# Subnet IDs for specific AZs output "ext_subnet_az1" { - description = "ID of External subnet AZ1" - value = values(aws_subnet.external)[0].id + value = aws_subnet.external[element(tolist(var.azs), 0)].id # Reference AZ1's external subnet ID } + output "ext_subnet_az2" { - description = "ID of External subnet AZ2" - value = values(aws_subnet.external)[1].id + value = aws_subnet.external[element(tolist(var.azs), 1)].id # Reference AZ2's external subnet ID } + output "int_subnet_az1" { - description = "ID of Internal subnet AZ1" - value = values(aws_subnet.internal)[0].id + value = aws_subnet.internal[element(tolist(var.azs), 0)].id # Reference AZ1's internal subnet ID } + output "int_subnet_az2" { - description = "ID of Internal subnet AZ2" - value = values(aws_subnet.internal)[1].id + value = aws_subnet.internal[element(tolist(var.azs), 1)].id # Reference AZ2's internal subnet ID } + output "mgmt_subnet_az1" { - description = "ID of Management subnet AZ1" - value = values(aws_subnet.management)[0].id + value = aws_subnet.management[element(tolist(var.azs), 0)].id # Reference AZ1's management subnet ID } + output "mgmt_subnet_az2" { - description = "ID of Management subnet AZ2" - value = values(aws_subnet.management)[1].id + value = aws_subnet.management[element(tolist(var.azs), 1)].id # Reference AZ2's management subnet ID +} + +# CIDR Block for Application and EKS Subnets +output "app_cidr" { + description = "Application server (Juice Shop) CIDR block" + value = [for k, subnet in aws_subnet.app_cidr : subnet.cidr_block] # Direct reference to app subnets +} + +output "eks_cidr" { + description = "EKS server CIDR block" + value = [cidrsubnet(module.vpc.vpc_cidr_block, 4, 15)] # Dedicated CIDR block for EKS } +# Security Groups output "external_sg_id" { - value = aws_security_group.external.id + value = aws_security_group.external.id } + output "management_sg_id" { - value = aws_security_group.management.id + value = aws_security_group.management.id } + output "internal_sg_id" { - value = aws_security_group.internal.id + value = aws_security_group.internal.id } -output "nap" { - value = var.nap -} -output "nic" { - value = var.nic -} + + + + + + + + diff --git a/infra/provider.tf b/infra/provider.tf new file mode 100644 index 00000000..0996a6ed --- /dev/null +++ b/infra/provider.tf @@ -0,0 +1,5 @@ +# AWS Provider Configuration +provider "aws" { + region = var.aws_region +} + diff --git a/infra/terraform.tfvars b/infra/terraform.tfvars index ee55ed2c..970ff4f3 100644 --- a/infra/terraform.tfvars +++ b/infra/terraform.tfvars @@ -1,8 +1,6 @@ -project_prefix = "aws-automation" +project_prefix = "aws-akash" resource_owner = "akash" aws_region = "us-east-1" azs = ["us-east-1a", "us-east-1b"] -# Set the following feature flags for this use case -nic = false -nap = true + diff --git a/infra/variables.tf b/infra/variables.tf index 37b27367..958545ea 100644 --- a/infra/variables.tf +++ b/infra/variables.tf @@ -1,10 +1,10 @@ -# Variables - +# Variables.tf variable "project_prefix" { type = string # default = "demo" description = "This value is inserted at the beginning of each AWS object (alpha-numeric, no special character)" } + variable "aws_region" { description = "aws region" type = string @@ -15,22 +15,16 @@ variable "resource_owner" { description = "owner of the deployment, for tagging purposes" default = "myName" } -variable "tf_cloud_organization" { - type = string - description = "TF cloud org (Value set in TF cloud)" -} -variable "ssh_key" { + +variable "cidr" { + description = "The CIDR block for the Virtual Private Cloud (VPC) of the deployment" + default = "10.0.0.0/16" # Updated to a larger CIDR block type = string - description = "key used for authentication in ssh-rsa format" -} -variable cidr { - description = "the CIDR block for the Virtual Private Cloud (VPC) of the deployment" - default = "10.0.0.0/16" - type = string validation { - condition = can(regex("^([0-9]{1,3}.){3}[0-9]{1,3}($|/(16|24))$",var.cidr)) + condition = can(regex("^([0-9]{1,3}.){3}[0-9]{1,3}($|/(15|16|24))$", var.cidr)) error_message = "The value must conform to a CIDR block format." } + } variable "azs" { description = "Availability Zones" @@ -61,10 +55,5 @@ variable "int_address_prefixes" { default = ["10.1.20.0/24", "10.1.120.0/24"] description = "Internal subnet address prefixes" } -variable "nap" { - type = bool -} -variable "nic" { - type = bool -} + diff --git a/infra/versions.tf b/infra/versions.tf index e12333b1..fe90661a 100644 --- a/infra/versions.tf +++ b/infra/versions.tf @@ -1,6 +1,15 @@ terraform { - required_version = ">= 0.14.0" - required_providers { - aws = ">= 4" + required_version = ">= 1.0.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.0" + } + random = { + source = "hashicorp/random" + version = ">= 3.5" + } + + } } -} + \ No newline at end of file diff --git a/nap/backend.tf b/nap/backend.tf new file mode 100644 index 00000000..f6aecda7 --- /dev/null +++ b/nap/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "nap/terraform.tfstate" # Path to state file + region = "us-east-1" # AWS region + dynamodb_table = "terraform-lock-table" # DynamoDB table for state locking + encrypt = true # Encrypt state file at rest + } +} \ No newline at end of file diff --git a/nap/charts/nginx-app-protect/values.yaml b/nap/charts/nginx-app-protect/values.yaml index 6ba69f90..633afb9a 100644 --- a/nap/charts/nginx-app-protect/values.yaml +++ b/nap/charts/nginx-app-protect/values.yaml @@ -1,69 +1,60 @@ --- controller: + name: controller + kind: deployment + nginxplus: true + mgmt: + licenseTokenSecretName: "license-token" + sslVerify: false + nginxReloadTimeout: 60000 appprotect: enable: true v5: true - volumes: - - name: app-protect-bd-config - emptyDir: {} - - name: app-protect-config - emptyDir: {} - - name: app-protect-bundles - emptyDir: {} - - volumeMounts: - - name: app-protect-bd-config - mountPath: /etc/app_protect/bd-config - - name: app-protect-config - mountPath: /etc/app_protect/config - - name: app-protect-bundles - mountPath: /etc/app_protect/bundles - ## Configuration for App Protect WAF v5 Enforcer enforcer: - # Host that the App Protect WAF v5 Enforcer runs on. - # This will normally be "127.0.0.1" as the Enforcer container - # will run in the same pod as the Ingress Controller container. host: "127.0.0.1" - # Port that the App Protect WAF v5 Enforcer runs on. - port: 50000 + port: 50000 image: - ## The image repository of the App Protect WAF v5 Enforcer. - repository: private-registry.nginx.com/nap/waf-enforcer - tag: "3.6.1" - ## The pull policy for the App Protect WAF v5 Enforcer image. + repository: private-registry.nginx.com/nap/waf-enforcer + tag: "5.4.0" pullPolicy: IfNotPresent - securityContext: - readOnlyRootFilesystem: true + securityContext: + readOnlyRootFilesystem: false # Temporarily disabled for debugging + allowPrivilegeEscalation: true + runAsNonRoot: false configManager: image: - ## The image repository of the App Protect WAF v5 Configuration Manager. repository: private-registry.nginx.com/nap/waf-config-mgr - ## The tag of the App Protect WAF v5 Configuration Manager image. - tag: "3.6.1" - ## The pull policy for the App Protect WAF v5 Configuration Manager image. + tag: "5.4.0" pullPolicy: IfNotPresent securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsUser: 101 #nginx - runAsNonRoot: true + readOnlyRootFilesystem: false # Temporarily disabled for debugging + allowPrivilegeEscalation: true + runAsNonRoot: false capabilities: drop: - all - appprotectdos: - enable: true + volumeMounts: + - name: app-protect-bd-config + mountPath: /opt/app_protect/bd_config + - name: app-protect-config + mountPath: /opt/app_protect/config + - name: app-protect-bundles + mountPath: /etc/app_protect/bundles enableSnippets: true image: - repository: private-registry.nginx.com/nginx-ic-dos/nginx-plus-ingress - tag: "3.6.1" - pullPolicy: Always - nginxplus: true + repository: private-registry.nginx.com/nginx-ic-nap-v5/nginx-plus-ingress + tag: "4.0.1" + pullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: false # Temporarily disabled for debugging + allowPrivilegeEscalation: true + logLevel: "debug" # Increased for debugging nginxStatus: - allowCidrs: 0.0.0.0/0 + allowCidrs: "0.0.0.0/0" port: 9000 readyStatus: initialDelaySeconds: 30 serviceAccount: - imagePullSecretName: regcred + imagePullSecretName: regcred prometheus: - create: true + create: true \ No newline at end of file diff --git a/nap/charts/prometheus/values.yaml b/nap/charts/prometheus/values.yaml index 1142f92a..7891baa1 100644 --- a/nap/charts/prometheus/values.yaml +++ b/nap/charts/prometheus/values.yaml @@ -1,4 +1,11 @@ --- prometheus: pushgateway: - enabled: false \ No newline at end of file + enabled: false + server: + persistentVolume: + enabled: true + alertmanager: + persistentVolume: + enabled: true + diff --git a/nap/crd.tf b/nap/crd.tf new file mode 100644 index 00000000..a237510f --- /dev/null +++ b/nap/crd.tf @@ -0,0 +1,2168 @@ +resource "kubectl_manifest" "dnsendpoints_crd" { + yaml_body = <<YAML +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: dnsendpoints.externaldns.nginx.org +spec: + group: externaldns.nginx.org + names: + kind: DNSEndpoint + listKind: DNSEndpointList + plural: dnsendpoints + singular: dnsendpoint + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: DNSEndpoint is the CRD wrapper for Endpoint + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DNSEndpointSpec holds information about endpoints. + properties: + endpoints: + items: + description: Endpoint describes DNS Endpoint. + properties: + dnsName: + description: The hostname for the DNS record + type: string + labels: + additionalProperties: + type: string + description: Labels stores labels defined for the Endpoint + type: object + providerSpecific: + description: ProviderSpecific stores provider specific config + items: + description: ProviderSpecificProperty represents provider + specific config property. + properties: + name: + description: Name of the property + type: string + value: + description: Value of the property + type: string + type: object + type: array + recordTTL: + description: TTL for the record + format: int64 + type: integer + recordType: + description: RecordType type of record, e.g. CNAME, A, SRV, + TXT, MX + type: string + targets: + description: The targets the DNS service points to + items: + type: string + type: array + type: object + type: array + type: object + status: + description: DNSEndpointStatus represents generation observed by the external + dns controller. + properties: + observedGeneration: + description: The generation observed by by the external-dns controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +YAML +} +resource "kubectl_manifest" "globalconfigurations_crd" { + yaml_body = <<YAML +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: globalconfigurations.k8s.nginx.org +spec: + group: k8s.nginx.org + names: + kind: GlobalConfiguration + listKind: GlobalConfigurationList + plural: globalconfigurations + shortNames: + - gc + singular: globalconfiguration + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalConfiguration defines the GlobalConfiguration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GlobalConfigurationSpec is the spec of the GlobalConfiguration + resource. + properties: + listeners: + items: + description: Listener defines a listener. + properties: + ipv4: + type: string + ipv6: + type: string + name: + type: string + port: + type: integer + protocol: + type: string + ssl: + type: boolean + type: object + type: array + type: object + type: object + served: true + storage: true +YAML +} +resource "kubectl_manifest" "policies_crd" { + yaml_body = <<YAML +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: policies.k8s.nginx.org +spec: + group: k8s.nginx.org + names: + kind: Policy + listKind: PolicyList + plural: policies + shortNames: + - pol + singular: policy + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Current state of the Policy. If the resource has a valid status, + it means it has been validated and accepted by the Ingress Controller. + jsonPath: .status.state + name: State + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Policy defines a Policy for VirtualServer and VirtualServerRoute + resources. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PolicySpec is the spec of the Policy resource. + The spec includes multiple fields, where each field represents a different policy. + Only one policy (field) is allowed. + properties: + accessControl: + description: AccessControl defines an access policy based on the source + IP of a request. + properties: + allow: + items: + type: string + type: array + deny: + items: + type: string + type: array + type: object + apiKey: + description: APIKey defines an API Key policy. + properties: + clientSecret: + type: string + suppliedIn: + description: SuppliedIn defines the locations API Key should be + supplied in. + properties: + header: + items: + type: string + type: array + query: + items: + type: string + type: array + type: object + type: object + basicAuth: + description: BasicAuth holds HTTP Basic authentication configuration + properties: + realm: + type: string + secret: + type: string + type: object + egressMTLS: + description: EgressMTLS defines an Egress MTLS policy. + properties: + ciphers: + type: string + protocols: + type: string + serverName: + type: boolean + sessionReuse: + type: boolean + sslName: + type: string + tlsSecret: + type: string + trustedCertSecret: + type: string + verifyDepth: + type: integer + verifyServer: + type: boolean + type: object + ingressClassName: + type: string + ingressMTLS: + description: IngressMTLS defines an Ingress MTLS policy. + properties: + clientCertSecret: + type: string + crlFileName: + type: string + verifyClient: + type: string + verifyDepth: + type: integer + type: object + jwt: + description: JWTAuth holds JWT authentication configuration. + properties: + jwksURI: + type: string + keyCache: + type: string + realm: + type: string + secret: + type: string + token: + type: string + type: object + oidc: + description: OIDC defines an Open ID Connect policy. + properties: + accessTokenEnable: + type: boolean + authEndpoint: + type: string + authExtraArgs: + items: + type: string + type: array + clientID: + type: string + clientSecret: + type: string + endSessionEndpoint: + type: string + jwksURI: + type: string + postLogoutRedirectURI: + type: string + redirectURI: + type: string + scope: + type: string + tokenEndpoint: + type: string + zoneSyncLeeway: + type: integer + type: object + rateLimit: + description: RateLimit defines a rate limit policy. + properties: + burst: + type: integer + delay: + type: integer + dryRun: + type: boolean + key: + type: string + logLevel: + type: string + noDelay: + type: boolean + rate: + type: string + rejectCode: + type: integer + scale: + type: boolean + zoneSize: + type: string + type: object + waf: + description: WAF defines an WAF policy. + properties: + apBundle: + type: string + apPolicy: + type: string + enable: + type: boolean + securityLog: + description: SecurityLog defines the security log of a WAF policy. + properties: + apLogBundle: + type: string + apLogConf: + type: string + enable: + type: boolean + logDest: + type: string + type: object + securityLogs: + items: + description: SecurityLog defines the security log of a WAF policy. + properties: + apLogBundle: + type: string + apLogConf: + type: string + enable: + type: boolean + logDest: + type: string + type: object + type: array + type: object + type: object + status: + description: PolicyStatus is the status of the policy resource + properties: + message: + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +YAML +} + +resource "kubectl_manifest" "transportservers_crd" { + yaml_body = <<YAML +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: transportservers.k8s.nginx.org +spec: + group: k8s.nginx.org + names: + kind: TransportServer + listKind: TransportServerList + plural: transportservers + shortNames: + - ts + singular: transportserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Current state of the TransportServer. If the resource has a valid + status, it means it has been validated and accepted by the Ingress Controller. + jsonPath: .status.state + name: State + type: string + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: TransportServer defines the TransportServer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TransportServerSpec is the spec of the TransportServer resource. + properties: + action: + description: TransportServerAction defines an action. + properties: + pass: + type: string + type: object + host: + type: string + ingressClassName: + type: string + listener: + description: TransportServerListener defines a listener for a TransportServer. + properties: + name: + type: string + protocol: + type: string + type: object + serverSnippets: + type: string + sessionParameters: + description: SessionParameters defines session parameters. + properties: + timeout: + type: string + type: object + streamSnippets: + type: string + tls: + description: TransportServerTLS defines TransportServerTLS configuration + for a TransportServer. + properties: + secret: + type: string + type: object + upstreamParameters: + description: UpstreamParameters defines parameters for an upstream. + properties: + connectTimeout: + type: string + nextUpstream: + type: boolean + nextUpstreamTimeout: + type: string + nextUpstreamTries: + type: integer + udpRequests: + type: integer + udpResponses: + type: integer + type: object + upstreams: + items: + description: TransportServerUpstream defines an upstream. + properties: + backup: + type: string + backupPort: + type: integer + failTimeout: + type: string + healthCheck: + description: TransportServerHealthCheck defines the parameters + for active Upstream HealthChecks. + properties: + enable: + type: boolean + fails: + type: integer + interval: + type: string + jitter: + type: string + match: + description: TransportServerMatch defines the parameters + of a custom health check. + properties: + expect: + type: string + send: + type: string + type: object + passes: + type: integer + port: + type: integer + timeout: + type: string + type: object + loadBalancingMethod: + type: string + maxConns: + type: integer + maxFails: + type: integer + name: + type: string + port: + type: integer + service: + type: string + type: object + type: array + type: object + status: + description: TransportServerStatus defines the status for the TransportServer + resource. + properties: + message: + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +YAML +} +resource "kubectl_manifest" "virtualserverroute_crd" { + yaml_body = <<YAML +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: virtualserverroutes.k8s.nginx.org +spec: + group: k8s.nginx.org + names: + kind: VirtualServerRoute + listKind: VirtualServerRouteList + plural: virtualserverroutes + shortNames: + - vsr + singular: virtualserverroute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Current state of the VirtualServerRoute. If the resource has a + valid status, it means it has been validated and accepted by the Ingress Controller. + jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.host + name: Host + type: string + - jsonPath: .status.externalEndpoints[*].ip + name: IP + type: string + - jsonPath: .status.externalEndpoints[*].hostname + name: ExternalHostname + priority: 1 + type: string + - jsonPath: .status.externalEndpoints[*].ports + name: Ports + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VirtualServerRoute defines the VirtualServerRoute resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualServerRouteSpec is the spec of the VirtualServerRoute + resource. + properties: + host: + type: string + ingressClassName: + type: string + subroutes: + items: + description: Route defines a route. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines the request + headers manipulation in an ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines the response + headers manipulation in an ActionProxy. + properties: + add: + items: + description: AddHeader defines an HTTP Header + with an optional Always field to use with the + add_header NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect in an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + dos: + type: string + errorPages: + items: + description: ErrorPage defines an ErrorPage in a Route. + properties: + codes: + items: + type: integer + type: array + redirect: + description: ErrorPageRedirect defines a redirect for + an ErrorPage. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ErrorPageReturn defines a return for an ErrorPage. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + type: array + location-snippets: + type: string + matches: + items: + description: Match defines a match. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines the request + headers manipulation in an ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines the + response headers manipulation in an ActionProxy. + properties: + add: + items: + description: AddHeader defines an HTTP Header + with an optional Always field to use with + the add_header NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect in + an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + conditions: + items: + description: Condition defines a condition in a MatchRule. + properties: + argument: + type: string + cookie: + type: string + header: + type: string + value: + type: string + variable: + type: string + type: object + type: array + splits: + items: + description: Split defines a split. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in + an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines + the request headers manipulation in an + ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP + Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines + the response headers manipulation in an + ActionProxy. + properties: + add: + items: + description: AddHeader defines an + HTTP Header with an optional Always + field to use with the add_header + NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect + in an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in + an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + weight: + type: integer + type: object + type: array + type: object + type: array + path: + type: string + policies: + items: + description: PolicyReference references a policy by name and + an optional namespace. + properties: + name: + type: string + namespace: + type: string + type: object + type: array + route: + type: string + splits: + items: + description: Split defines a split. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines the request + headers manipulation in an ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines the + response headers manipulation in an ActionProxy. + properties: + add: + items: + description: AddHeader defines an HTTP Header + with an optional Always field to use with + the add_header NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect in + an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + weight: + type: integer + type: object + type: array + type: object + type: array + upstreams: + items: + description: Upstream defines an upstream. + properties: + backup: + type: string + backupPort: + type: integer + buffer-size: + type: string + buffering: + type: boolean + buffers: + description: UpstreamBuffers defines Buffer Configuration for + an Upstream. + properties: + number: + type: integer + size: + type: string + type: object + client-max-body-size: + type: string + connect-timeout: + type: string + fail-timeout: + type: string + healthCheck: + description: HealthCheck defines the parameters for active Upstream + HealthChecks. + properties: + connect-timeout: + type: string + enable: + type: boolean + fails: + type: integer + grpcService: + type: string + grpcStatus: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + interval: + type: string + jitter: + type: string + keepalive-time: + type: string + mandatory: + type: boolean + passes: + type: integer + path: + type: string + persistent: + type: boolean + port: + type: integer + read-timeout: + type: string + send-timeout: + type: string + statusMatch: + type: string + tls: + description: UpstreamTLS defines a TLS configuration for + an Upstream. + properties: + enable: + type: boolean + type: object + type: object + keepalive: + type: integer + lb-method: + type: string + max-conns: + type: integer + max-fails: + type: integer + name: + type: string + next-upstream: + type: string + next-upstream-timeout: + type: string + next-upstream-tries: + type: integer + ntlm: + type: boolean + port: + type: integer + queue: + description: UpstreamQueue defines Queue Configuration for an + Upstream. + properties: + size: + type: integer + timeout: + type: string + type: object + read-timeout: + type: string + send-timeout: + type: string + service: + type: string + sessionCookie: + description: SessionCookie defines the parameters for session + persistence. + properties: + domain: + type: string + enable: + type: boolean + expires: + type: string + httpOnly: + type: boolean + name: + type: string + path: + type: string + samesite: + type: string + secure: + type: boolean + type: object + slow-start: + type: string + subselector: + additionalProperties: + type: string + type: object + tls: + description: UpstreamTLS defines a TLS configuration for an + Upstream. + properties: + enable: + type: boolean + type: object + type: + type: string + use-cluster-ip: + type: boolean + type: object + type: array + type: object + status: + description: VirtualServerRouteStatus defines the status for the VirtualServerRoute + resource. + properties: + externalEndpoints: + items: + description: ExternalEndpoint defines the IP/ Hostname and ports + used to connect to this resource. + properties: + hostname: + type: string + ip: + type: string + ports: + type: string + type: object + type: array + message: + type: string + reason: + type: string + referencedBy: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +YAML +} +resource "kubectl_manifest" "virtualserver_crd" { + yaml_body = <<YAML +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: virtualservers.k8s.nginx.org +spec: + group: k8s.nginx.org + names: + kind: VirtualServer + listKind: VirtualServerList + plural: virtualservers + shortNames: + - vs + singular: virtualserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Current state of the VirtualServer. If the resource has a valid + status, it means it has been validated and accepted by the Ingress Controller. + jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.host + name: Host + type: string + - jsonPath: .status.externalEndpoints[*].ip + name: IP + type: string + - jsonPath: .status.externalEndpoints[*].hostname + name: ExternalHostname + priority: 1 + type: string + - jsonPath: .status.externalEndpoints[*].ports + name: Ports + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VirtualServer defines the VirtualServer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualServerSpec is the spec of the VirtualServer resource. + properties: + dos: + type: string + externalDNS: + description: ExternalDNS defines externaldns sub-resource of a virtual + server. + properties: + enable: + type: boolean + labels: + additionalProperties: + type: string + description: Labels stores labels defined for the Endpoint + type: object + providerSpecific: + description: ProviderSpecific stores provider specific config + items: + description: |- + ProviderSpecificProperty defines specific property + for using with ExternalDNS sub-resource. + properties: + name: + description: Name of the property + type: string + value: + description: Value of the property + type: string + type: object + type: array + recordTTL: + description: TTL for the record + format: int64 + type: integer + recordType: + type: string + type: object + gunzip: + type: boolean + host: + type: string + http-snippets: + type: string + ingressClassName: + type: string + internalRoute: + description: InternalRoute allows for the configuration of internal + routing. + type: boolean + listener: + description: VirtualServerListener references a custom http and/or + https listener defined in GlobalConfiguration. + properties: + http: + type: string + https: + type: string + type: object + policies: + items: + description: PolicyReference references a policy by name and an + optional namespace. + properties: + name: + type: string + namespace: + type: string + type: object + type: array + routes: + items: + description: Route defines a route. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines the request + headers manipulation in an ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines the response + headers manipulation in an ActionProxy. + properties: + add: + items: + description: AddHeader defines an HTTP Header + with an optional Always field to use with the + add_header NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect in an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + dos: + type: string + errorPages: + items: + description: ErrorPage defines an ErrorPage in a Route. + properties: + codes: + items: + type: integer + type: array + redirect: + description: ErrorPageRedirect defines a redirect for + an ErrorPage. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ErrorPageReturn defines a return for an ErrorPage. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + type: array + location-snippets: + type: string + matches: + items: + description: Match defines a match. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines the request + headers manipulation in an ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines the + response headers manipulation in an ActionProxy. + properties: + add: + items: + description: AddHeader defines an HTTP Header + with an optional Always field to use with + the add_header NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect in + an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + conditions: + items: + description: Condition defines a condition in a MatchRule. + properties: + argument: + type: string + cookie: + type: string + header: + type: string + value: + type: string + variable: + type: string + type: object + type: array + splits: + items: + description: Split defines a split. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in + an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines + the request headers manipulation in an + ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP + Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines + the response headers manipulation in an + ActionProxy. + properties: + add: + items: + description: AddHeader defines an + HTTP Header with an optional Always + field to use with the add_header + NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect + in an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in + an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + weight: + type: integer + type: object + type: array + type: object + type: array + path: + type: string + policies: + items: + description: PolicyReference references a policy by name and + an optional namespace. + properties: + name: + type: string + namespace: + type: string + type: object + type: array + route: + type: string + splits: + items: + description: Split defines a split. + properties: + action: + description: Action defines an action. + properties: + pass: + type: string + proxy: + description: ActionProxy defines a proxy in an Action. + properties: + requestHeaders: + description: ProxyRequestHeaders defines the request + headers manipulation in an ActionProxy. + properties: + pass: + type: boolean + set: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaders: + description: ProxyResponseHeaders defines the + response headers manipulation in an ActionProxy. + properties: + add: + items: + description: AddHeader defines an HTTP Header + with an optional Always field to use with + the add_header NGINX directive. + properties: + always: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + hide: + items: + type: string + type: array + ignore: + items: + type: string + type: array + pass: + items: + type: string + type: array + type: object + rewritePath: + type: string + upstream: + type: string + type: object + redirect: + description: ActionRedirect defines a redirect in + an Action. + properties: + code: + type: integer + url: + type: string + type: object + return: + description: ActionReturn defines a return in an Action. + properties: + body: + type: string + code: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: + type: string + type: object + type: object + weight: + type: integer + type: object + type: array + type: object + type: array + server-snippets: + type: string + tls: + description: TLS defines TLS configuration for a VirtualServer. + properties: + cert-manager: + description: CertManager defines a cert manager config for a TLS. + properties: + cluster-issuer: + type: string + common-name: + type: string + duration: + type: string + issue-temp-cert: + type: boolean + issuer: + type: string + issuer-group: + type: string + issuer-kind: + type: string + renew-before: + type: string + usages: + type: string + type: object + redirect: + description: TLSRedirect defines a redirect for a TLS. + properties: + basedOn: + type: string + code: + type: integer + enable: + type: boolean + type: object + secret: + type: string + type: object + upstreams: + items: + description: Upstream defines an upstream. + properties: + backup: + type: string + backupPort: + type: integer + buffer-size: + type: string + buffering: + type: boolean + buffers: + description: UpstreamBuffers defines Buffer Configuration for + an Upstream. + properties: + number: + type: integer + size: + type: string + type: object + client-max-body-size: + type: string + connect-timeout: + type: string + fail-timeout: + type: string + healthCheck: + description: HealthCheck defines the parameters for active Upstream + HealthChecks. + properties: + connect-timeout: + type: string + enable: + type: boolean + fails: + type: integer + grpcService: + type: string + grpcStatus: + type: integer + headers: + items: + description: Header defines an HTTP Header. + properties: + name: + type: string + value: + type: string + type: object + type: array + interval: + type: string + jitter: + type: string + keepalive-time: + type: string + mandatory: + type: boolean + passes: + type: integer + path: + type: string + persistent: + type: boolean + port: + type: integer + read-timeout: + type: string + send-timeout: + type: string + statusMatch: + type: string + tls: + description: UpstreamTLS defines a TLS configuration for + an Upstream. + properties: + enable: + type: boolean + type: object + type: object + keepalive: + type: integer + lb-method: + type: string + max-conns: + type: integer + max-fails: + type: integer + name: + type: string + next-upstream: + type: string + next-upstream-timeout: + type: string + next-upstream-tries: + type: integer + ntlm: + type: boolean + port: + type: integer + queue: + description: UpstreamQueue defines Queue Configuration for an + Upstream. + properties: + size: + type: integer + timeout: + type: string + type: object + read-timeout: + type: string + send-timeout: + type: string + service: + type: string + sessionCookie: + description: SessionCookie defines the parameters for session + persistence. + properties: + domain: + type: string + enable: + type: boolean + expires: + type: string + httpOnly: + type: boolean + name: + type: string + path: + type: string + samesite: + type: string + secure: + type: boolean + type: object + slow-start: + type: string + subselector: + additionalProperties: + type: string + type: object + tls: + description: UpstreamTLS defines a TLS configuration for an + Upstream. + properties: + enable: + type: boolean + type: object + type: + type: string + use-cluster-ip: + type: boolean + type: object + type: array + type: object + status: + description: VirtualServerStatus defines the status for the VirtualServer + resource. + properties: + externalEndpoints: + items: + description: ExternalEndpoint defines the IP/ Hostname and ports + used to connect to this resource. + properties: + hostname: + type: string + ip: + type: string + ports: + type: string + type: object + type: array + message: + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +YAML +} + diff --git a/nap/data.tf b/nap/data.tf index d83783f8..c59e43ce 100644 --- a/nap/data.tf +++ b/nap/data.tf @@ -1,17 +1,29 @@ -data "tfe_outputs" "infra" { - organization = var.tf_cloud_organization - workspace = "infra" +data "terraform_remote_state" "infra" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "infra/terraform.tfstate" # Path to infra's state file + region = "us-east-1" # AWS region + } } -data "tfe_outputs" "eks" { - organization = var.tf_cloud_organization - workspace = "eks" + +# Read eks state from S3 +data "terraform_remote_state" "eks" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "eks-cluster/terraform.tfstate" # Path to EKS state file + region = "us-east-1" # AWS region + } } + data "aws_eks_cluster_auth" "auth" { - name = data.tfe_outputs.eks.values.cluster_name + name = data.terraform_remote_state.eks.outputs.cluster_name } + data "kubernetes_service_v1" "nginx-service" { metadata { - name = try(format("%s-%s-controller", helm_release.nginx-plus-ingress.0.name, helm_release.nginx-plus-ingress.0.chart)) - namespace = try(helm_release.nginx-plus-ingress[0].namespace) + name = try(format("%s-%s-controller", helm_release.nginx-plus-ingress.name, helm_release.nginx-plus-ingress.chart)) + namespace = try(helm_release.nginx-plus-ingress.namespace) } } diff --git a/nap/locals.tf b/nap/locals.tf index 82ab28ec..bb55c215 100644 --- a/nap/locals.tf +++ b/nap/locals.tf @@ -1,9 +1,11 @@ locals { - project_prefix = data.tfe_outputs.infra.values.project_prefix - build_suffix = data.tfe_outputs.infra.values.build_suffix - aws_region = data.tfe_outputs.infra.values.aws_region - host = data.tfe_outputs.eks.values.cluster_endpoint - cluster_ca_certificate = data.tfe_outputs.eks.values.kubeconfig-certificate-authority-data - cluster_name = data.tfe_outputs.eks.values.cluster_name - app = format("%s-nap-%s", local.project_prefix, local.build_suffix) -} + project_prefix = data.terraform_remote_state.infra.outputs.project_prefix + build_suffix = data.terraform_remote_state.infra.outputs.build_suffix + cluster_name = data.terraform_remote_state.eks.outputs.cluster_name # Single definition + ebs_csi_driver_role_arn = data.terraform_remote_state.eks.outputs.ebs_csi_driver_role_arn + cluster_endpoint = data.terraform_remote_state.eks.outputs.cluster_endpoint + aws_region = data.terraform_remote_state.infra.outputs.aws_region + host = data.terraform_remote_state.eks.outputs.cluster_endpoint + cluster_ca_certificate = data.terraform_remote_state.eks.outputs.kubeconfig-certificate-authority-data + app = format("%s-nap-%s", local.project_prefix, local.build_suffix) +} \ No newline at end of file diff --git a/nap/main.tf b/nap/main.tf index 5e728fa3..3357bb44 100644 --- a/nap/main.tf +++ b/nap/main.tf @@ -1,21 +1,54 @@ provider "aws" { - region = local.aws_region + region = local.aws_region } + provider "kubernetes" { - host = local.host - cluster_ca_certificate = base64decode(local.cluster_ca_certificate) - token = data.aws_eks_cluster_auth.auth.token + host = data.terraform_remote_state.eks.outputs.cluster_endpoint + cluster_ca_certificate = base64decode(data.terraform_remote_state.eks.outputs.kubeconfig-certificate-authority-data) + token = data.aws_eks_cluster_auth.auth.token + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = [ + "eks", + "get-token", + "--cluster-name", + data.terraform_remote_state.eks.outputs.cluster_name + ] + } } + provider "helm" { - kubernetes { - host = local.host - cluster_ca_certificate = base64decode(local.cluster_ca_certificate) - token = data.aws_eks_cluster_auth.auth.token + kubernetes { + host = data.terraform_remote_state.eks.outputs.cluster_endpoint + cluster_ca_certificate = base64decode(data.terraform_remote_state.eks.outputs.kubeconfig-certificate-authority-data) + token = data.aws_eks_cluster_auth.auth.token + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = [ + "eks", + "get-token", + "--cluster-name", + data.terraform_remote_state.eks.outputs.cluster_name + ] } + } } + provider "kubectl" { - host = local.host - cluster_ca_certificate = base64decode(local.cluster_ca_certificate) - token = data.aws_eks_cluster_auth.auth.token - load_config_file = false + host = data.terraform_remote_state.eks.outputs.cluster_endpoint + cluster_ca_certificate = base64decode(data.terraform_remote_state.eks.outputs.kubeconfig-certificate-authority-data) + token = data.aws_eks_cluster_auth.auth.token + load_config_file = false + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = [ + "eks", + "get-token", + "--cluster-name", + data.terraform_remote_state.eks.outputs.cluster_name + ] + } } \ No newline at end of file diff --git a/nap/nap.tf b/nap/nap.tf index cfaa0c7e..7931f241 100644 --- a/nap/nap.tf +++ b/nap/nap.tf @@ -5,29 +5,10 @@ resource "helm_release" "nginx-plus-ingress" { version = "2.0.1" namespace = kubernetes_namespace.nginx-ingress.metadata[0].name values = [file("./charts/nginx-app-protect/values.yaml")] - timeout = 500 + timeout = 600 depends_on = [ - kubernetes_secret.docker-registry, - null_resource.nap_complete + kubernetes_secret.docker-registry ] } -resource "null_resource" "nap_complete" { - triggers = { - nap_complete = "true" - } -} - -resource "null_resource" "copy_compiled_policy" { - depends_on = [ - helm_release.nginx-plus-ingress # Ensure this runs after the Helm release - ] - - provisioner "local-exec" { - command = <<EOT - kubectl cp ${path.module}/nap/charts/compiled_policy.tgz nginx-ingress/${helm_release.nginx-plus-ingress.name}:/etc/app_protect/bundles/compiled_policy.tgz -n nginx-ingress - kubectl exec -n nginx-ingress ${helm_release.nginx-plus-ingress.name} -- ls -lh /etc/app_protect/bundles/ - EOT - } -} diff --git a/nap/outputs.tf b/nap/outputs.tf index ede86e5c..26bd9c41 100644 --- a/nap/outputs.tf +++ b/nap/outputs.tf @@ -7,7 +7,10 @@ output "external_port" { output "origin_source" { value = "nap" } + output "nap_deployment_name" { - value = try (helm_release.nginx-plus-ingress[0].name) + value = try (helm_release.nginx-plus-ingress.name) sensitive = true } + + diff --git a/nap/prometheus.tf b/nap/prometheus.tf index 2faf76e2..8866ca8e 100644 --- a/nap/prometheus.tf +++ b/nap/prometheus.tf @@ -1,7 +1,10 @@ resource "helm_release" "prometheus" { - name = format("%s-pro-%s", local.project_prefix, local.build_suffix) - repository = "https://prometheus-community.github.io/helm-charts" - chart = "prometheus" - namespace = kubernetes_namespace.monitoring.metadata[0].name - values = [file("./charts/prometheus/values.yaml")] + name = format("%s-pro-%s", local.project_prefix, local.build_suffix) + repository = "https://prometheus-community.github.io/helm-charts" + chart = "prometheus" + #version = "27.3.0" + namespace = kubernetes_namespace.monitoring.metadata[0].name + values = [file("./charts/prometheus/values.yaml")] + + } diff --git a/nap/secrets.tf b/nap/secrets.tf index 5fa2e9ce..6d315b6d 100644 --- a/nap/secrets.tf +++ b/nap/secrets.tf @@ -1,20 +1,33 @@ -resource "kubernetes_secret" "docker-registry" { - metadata { - name = "regcred" - namespace = kubernetes_namespace.nginx-ingress.metadata[0].name - } - - type = "kubernetes.io/dockerconfigjson" +resource "kubernetes_secret" "nginx_license" { + metadata { + name = "license-token" + namespace = kubernetes_namespace.nginx-ingress.metadata[0].name + } + + data = { + "license.jwt" = var.nginx_jwt + } - data = { - ".dockerconfigjson" = jsonencode({ - auths = { - "${var.nginx_registry}" = { - "username" = var.nginx_jwt - "password" = var.nginx_pwd - "auth" = base64encode("${var.nginx_jwt}:${var.nginx_pwd}") - } - } - }) - } + type = "nginx.com/license" } + +resource "kubernetes_secret" "docker-registry" { + metadata { + name = "regcred" + namespace = kubernetes_namespace.nginx-ingress.metadata[0].name + } + + type = "kubernetes.io/dockerconfigjson" + + data = { + ".dockerconfigjson" = jsonencode({ + auths = { + "${var.nginx_registry}" = { + "username" = var.nginx_jwt, # Use the JWT token as the username + "password" = "none", # Set password to "none" + "auth" = base64encode("${var.nginx_jwt}:none") # Encode "<jwt-token>:none" + } + } + }) + } +} \ No newline at end of file diff --git a/nap/storage.tf b/nap/storage.tf new file mode 100644 index 00000000..c2a7fa2f --- /dev/null +++ b/nap/storage.tf @@ -0,0 +1,16 @@ +resource "kubernetes_storage_class_v1" "aws_csi" { + metadata { + name = "ebs-sc" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + storage_provisioner = "ebs.csi.aws.com" + reclaim_policy = "Delete" + parameters = { + type = "gp3" + fsType = "ext4" + } + allow_volume_expansion = true + volume_binding_mode = "WaitForFirstConsumer" +} diff --git a/nap/variables.tf b/nap/variables.tf index bbe59469..f2318eb7 100644 --- a/nap/variables.tf +++ b/nap/variables.tf @@ -1,9 +1,3 @@ -# Terraform Cloud Organization -variable "tf_cloud_organization" { - type = string - description = "Terraform Cloud Organization (set in GitHub secrets)" -} - # NGINX Configuration variable "nginx_registry" { type = string @@ -11,19 +5,20 @@ variable "nginx_registry" { default = "private-registry.nginx.com" } -variable "nginx_jwt" { - type = string - description = "JWT for pulling NGINX image" -} - variable "nginx_pwd" { type = string description = "Password for NGINX (if required)" default = "none" } -# SSH Key (for potential SSH-based configurations) -variable "ssh_key" { +variable "workspace_path" { + description = "The path to the workspace directory" + type = string +} + +variable "nginx_jwt" { + description = "The JWT token for NGINX" type = string - description = "Unneeded for NGINX App Protect, only used for TF Cloud variable warnings" + sensitive = true # Mark as sensitive to avoid exposing it in logs } + diff --git a/nap/versions.tf b/nap/versions.tf index a19b134a..cc6970f9 100644 --- a/nap/versions.tf +++ b/nap/versions.tf @@ -1,18 +1,32 @@ terraform { - required_version = ">= 0.14.0" + required_version = ">= 1.6.0" + required_providers { - aws = ">= 4" + aws = { + source = "hashicorp/aws" + version = ">= 4.0.0" + } kubernetes = { - source = "hashicorp/kubernetes" - version = "2.16.1" + source = "hashicorp/kubernetes" + version = ">= 2.23.0" } helm = { source = "hashicorp/helm" - version = ">=2.7.0" + version = ">= 2.12.0" + } + github = { + source = "integrations/github" + version = "6.6.0" } kubectl = { source = "gavinbunney/kubectl" - version = ">= 1.7.0" + version = ">= 1.15.0" + } + docker = { + source = "kreuzwerker/docker" + version = ">= 3.0.2" } + } -} \ No newline at end of file + +} diff --git a/policy/Dockerfile b/policy/Dockerfile new file mode 100755 index 00000000..f39c3be8 --- /dev/null +++ b/policy/Dockerfile @@ -0,0 +1,35 @@ +# syntax=docker/dockerfile:1 +ARG BASE_IMAGE=private-registry.nginx.com/nap/waf-compiler:5.4.0 +FROM ${BASE_IMAGE} + +# Installing packages as root +USER root + +ENV DEBIAN_FRONTEND="noninteractive" + +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + apt-get update \ + && apt-get install -y \ + apt-transport-https \ + lsb-release \ + ca-certificates \ + wget \ + gnupg2 \ + ubuntu-keyring \ + && wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | gpg --dearmor | \ + tee /usr/share/keyrings/app-protect-security-updates.gpg >/dev/null \ + && printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] \ + https://pkgs.nginx.com/app-protect-security-updates/ubuntu `lsb_release -cs` nginx-plus\n" | \ + tee /etc/apt/sources.list.d/nginx-app-protect.list \ + && wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx \ + && apt-get update \ + && apt-get install -y \ + app-protect-attack-signatures \ + app-protect-bot-signatures \ + app-protect-threat-campaigns \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# non-root default user (UID 101) +USER nginx diff --git a/policy/backend.tf b/policy/backend.tf new file mode 100644 index 00000000..c496426d --- /dev/null +++ b/policy/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "policy/terraform.tfstate" # Path to state file + region = "us-east-1" # AWS region + dynamodb_table = "terraform-lock-table" # DynamoDB table for state locking + encrypt = true # Encrypt state file at rest + } +} \ No newline at end of file diff --git a/policy/data.tf b/policy/data.tf new file mode 100755 index 00000000..c70d87c6 --- /dev/null +++ b/policy/data.tf @@ -0,0 +1,35 @@ +# Read infra state from S3 +data "terraform_remote_state" "infra" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "infra/terraform.tfstate" # Path to infra's state file + region = "us-east-1" # AWS region + } +} + +# Read eks state from S3 +data "terraform_remote_state" "eks" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "eks-cluster/terraform.tfstate" # Path to EKS state file + region = "us-east-1" # AWS region + } +} + +# Read nap state from S3 +data "terraform_remote_state" "nap" { + backend = "s3" + config = { + bucket = "akash-terraform-state-bucket" # Your S3 bucket name + key = "nap/terraform.tfstate" # Path to NAP state file + region = "us-east-1" # AWS region + } +} + +# Keep existing data sources for Kubernetes +data "aws_eks_cluster_auth" "auth" { + name = data.terraform_remote_state.eks.outputs.cluster_name +} + diff --git a/policy/locals.tf b/policy/locals.tf new file mode 100755 index 00000000..709ae8d7 --- /dev/null +++ b/policy/locals.tf @@ -0,0 +1,9 @@ +locals { + project_prefix = data.terraform_remote_state.infra.outputs.project_prefix + build_suffix = data.terraform_remote_state.infra.outputs.build_suffix + aws_region = data.terraform_remote_state.infra.outputs.aws_region + host = data.terraform_remote_state.eks.outputs.cluster_endpoint + cluster_ca_certificate = data.terraform_remote_state.eks.outputs.kubeconfig-certificate-authority-data + cluster_name = data.terraform_remote_state.eks.outputs.cluster_name + app = format("%s-nap-%s", local.project_prefix, local.build_suffix) +} \ No newline at end of file diff --git a/policy/main.tf b/policy/main.tf new file mode 100755 index 00000000..38df203a --- /dev/null +++ b/policy/main.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = local.aws_region +} diff --git a/policy/policy.json b/policy/policy.json new file mode 100755 index 00000000..b0767da5 --- /dev/null +++ b/policy/policy.json @@ -0,0 +1,8 @@ +{ + "policy": { + "name": "policy_name", + "template": { "name": "POLICY_TEMPLATE_NGINX_BASE" }, + "applicationLanguage": "utf-8", + "enforcementMode": "blocking" + } +} diff --git a/policy/versions.tf b/policy/versions.tf new file mode 100755 index 00000000..760e7d43 --- /dev/null +++ b/policy/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.6.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.23.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.15.0" + } + } +} diff --git a/s3/bootstrap.tf b/s3/bootstrap.tf new file mode 100644 index 00000000..58fa9e40 --- /dev/null +++ b/s3/bootstrap.tf @@ -0,0 +1,73 @@ +data "external" "s3_bucket_check" { + program = ["bash", "-c", <<EOT + if aws s3api head-bucket --bucket ${var.tf_state_bucket} --region ${var.aws_region} >/dev/null 2>&1; then + printf '{"exists":"true"}' + else + printf '{"exists":"false"}' + fi + EOT + ] +} + +data "external" "dynamodb_table_check" { + program = ["bash", "-c", <<EOT + if aws dynamodb describe-table --table-name terraform-lock-table --region ${var.aws_region} >/dev/null 2>&1; then + printf '{"exists":"true"}' + else + printf '{"exists":"false"}' + fi + EOT + ] +} + +# Create S3 bucket only if missing +resource "aws_s3_bucket" "terraform_state_bucket" { + count = data.external.s3_bucket_check.result.exists == "true" ? 0 : 1 + + bucket = var.tf_state_bucket + force_destroy = true + + tags = { + Name = "Terraform State Bucket" + } +} + +# Configure bucket versioning +resource "aws_s3_bucket_versioning" "state_bucket" { + count = data.external.s3_bucket_check.result.exists == "true" ? 0 : 1 + + bucket = aws_s3_bucket.terraform_state_bucket[0].id + versioning_configuration { + status = "Enabled" + } +} + +# Configure bucket encryption +resource "aws_s3_bucket_server_side_encryption_configuration" "state_bucket" { + count = data.external.s3_bucket_check.result.exists == "true" ? 0 : 1 + + bucket = aws_s3_bucket.terraform_state_bucket[0].id + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +# Create DynamoDB table only if missing +resource "aws_dynamodb_table" "terraform_state_lock" { + count = data.external.dynamodb_table_check.result.exists == "true" ? 0 : 1 + + name = "terraform-lock-table" + billing_mode = "PAY_PER_REQUEST" + hash_key = "LockID" + + attribute { + name = "LockID" + type = "S" + } + + tags = { + Name = "Terraform State Lock Table" + } +} \ No newline at end of file diff --git a/s3/iam.tf b/s3/iam.tf new file mode 100644 index 00000000..017bedef --- /dev/null +++ b/s3/iam.tf @@ -0,0 +1,54 @@ +data "aws_caller_identity" "current" {} + +# Create IAM role if it doesn't exist +resource "aws_iam_role" "terraform_execution_role" { + count = var.create_iam_resources ? 1 : 0 + + name = "TerraformCIExecutionRole" + description = "Role for basic Terraform CI/CD executions" + max_session_duration = 3600 + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow", + Principal = { + AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root" + }, + Action = "sts:AssumeRole" + }] + }) +} + +# Create IAM policy if it doesn't exist +resource "aws_iam_policy" "terraform_state_access" { + count = var.create_iam_resources ? 1 : 0 + + name = "TerraformStateAccess" + description = "Minimum permissions for S3 state management" + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow", + Action = [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket" + ], + Resource = [ + "arn:aws:s3:::${var.tf_state_bucket}", + "arn:aws:s3:::${var.tf_state_bucket}/*" + ] + }] + }) +} + +# Attach the policy to the IAM role +resource "aws_iam_role_policy_attachment" "state_access" { + count = var.create_iam_resources ? 1 : 0 + + role = aws_iam_role.terraform_execution_role[0].name + policy_arn = aws_iam_policy.terraform_state_access[0].arn +} \ No newline at end of file diff --git a/s3/outputs.tf b/s3/outputs.tf new file mode 100644 index 00000000..0bf83ec2 --- /dev/null +++ b/s3/outputs.tf @@ -0,0 +1,51 @@ +# S3 Bucket Details +output "s3_bucket_created" { + description = "Whether the S3 bucket was created." + value = length(aws_s3_bucket.terraform_state_bucket) > 0 ? true : false +} + +output "s3_bucket_name" { + description = "The name of the S3 bucket." + value = length(aws_s3_bucket.terraform_state_bucket) > 0 ? aws_s3_bucket.terraform_state_bucket[0].bucket : null +} + +# DynamoDB Table Details +output "dynamodb_table_created" { + description = "Whether the DynamoDB table was created." + value = length(aws_dynamodb_table.terraform_state_lock) > 0 ? true : false +} + +output "dynamodb_table_name" { + description = "The name of the DynamoDB table." + value = length(aws_dynamodb_table.terraform_state_lock) > 0 ? aws_dynamodb_table.terraform_state_lock[0].name : null +} + +# Output the ARN of the created IAM role (if created) +output "terraform_execution_role_arn" { + description = "The ARN of the IAM role created for Terraform CI/CD executions." + value = var.create_iam_resources ? aws_iam_role.terraform_execution_role[0].arn : null +} + +# Output the ARN of the created IAM policy (if created) +output "terraform_state_access_policy_arn" { + description = "The ARN of the IAM policy created for Terraform state access." + value = var.create_iam_resources ? aws_iam_policy.terraform_state_access[0].arn : null +} + +# Output the name of the created IAM role (if created) +output "terraform_execution_role_name" { + description = "The name of the IAM role created for Terraform CI/CD executions." + value = var.create_iam_resources ? aws_iam_role.terraform_execution_role[0].name : null +} + +# Output the name of the created IAM policy (if created) +output "terraform_state_access_policy_name" { + description = "The name of the IAM policy created for Terraform state access." + value = var.create_iam_resources ? aws_iam_policy.terraform_state_access[0].name : null +} + +# Output the ID of the AWS account (for reference) +output "aws_account_id" { + description = "The ID of the AWS account where the resources are being created." + value = data.aws_caller_identity.current.account_id +} \ No newline at end of file diff --git a/s3/provider.tf b/s3/provider.tf new file mode 100644 index 00000000..0996a6ed --- /dev/null +++ b/s3/provider.tf @@ -0,0 +1,5 @@ +# AWS Provider Configuration +provider "aws" { + region = var.aws_region +} + diff --git a/s3/variables.tf b/s3/variables.tf new file mode 100644 index 00000000..9659d412 --- /dev/null +++ b/s3/variables.tf @@ -0,0 +1,16 @@ +variable "tf_state_bucket" { + type = string + description = "S3 bucket for Terraform state" + default = "akash-terraform-state-bucket" +} +variable "create_iam_resources" { + description = "Whether to create IAM resources (role and policy)." + type = bool + default = true +} + +variable "aws_region" { + description = "aws region" + type = string + default = "us-east-1" +} diff --git a/s3/versions.tf b/s3/versions.tf new file mode 100644 index 00000000..fe90661a --- /dev/null +++ b/s3/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_version = ">= 1.0.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.0" + } + random = { + source = "hashicorp/random" + version = ">= 3.5" + } + + } + } + \ No newline at end of file