From d46a8e1f8c45054c0db6f40a113ee599ba153c63 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Mon, 8 Apr 2024 21:48:42 -0400 Subject: [PATCH] feat: (IAC-1117) dark site deployment --- viya4-deployment-darksite/README.md | 60 ++++++ .../baseline-to-ecr/00_vars.sh | 10 + .../baseline-to-ecr/01_run_all.sh | 11 + .../baseline-to-ecr/README.md | 13 ++ .../baseline-to-ecr/auto_scaler.sh | 54 +++++ .../baseline-to-ecr/cert_manager.sh | 57 +++++ .../baseline-to-ecr/ebs_driver.sh | 85 ++++++++ .../baseline-to-ecr/ingress_nginx.sh | 62 ++++++ .../baseline-to-ecr/metrics_server.sh | 41 ++++ .../nfs_subdir_external_provisioner.sh | 40 ++++ .../baseline-to-ecr/openldap.sh | 26 +++ .../darksite-openldap-mod/README.md | 6 + .../darksite-openldap-mod.sh | 186 ++++++++++++++++ .../01_iac_deploy.sh | 51 +++++ .../02_dac_deploy.sh | 120 +++++++++++ .../infrastructure/terraform.tfvars | 111 ++++++++++ .../software/ansible-vars-iac.yaml | 200 ++++++++++++++++++ .../software/sitedefault.yaml | 26 +++ .../install-baseline-helm-from-ecr/00_vars.sh | 6 + .../install-baseline-helm-from-ecr/README.md | 37 ++++ .../auto_scaler_install.sh | 44 ++++ .../cert_manager_install.sh | 43 ++++ .../ebs-csi-driver.sh | 57 +++++ .../ingress_nginx_install.sh | 77 +++++++ .../metrics_server_install.sh | 23 ++ .../nfs_provisioner_install.sh | 44 ++++ .../pg_nfs_provisioner_install.sh | 45 ++++ .../mirrormgr-to-ecr/00_vars.sh | 7 + .../mirrormgr-to-ecr/01_mirrormgr-ecr.sh | 35 +++ .../mirrormgr-to-ecr/02_cleanup-ecr.sh | 28 +++ .../mirrormgr-to-ecr/README.md | 23 ++ 31 files changed, 1628 insertions(+) create mode 100644 viya4-deployment-darksite/README.md create mode 100644 viya4-deployment-darksite/baseline-to-ecr/00_vars.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/README.md create mode 100644 viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/openldap.sh create mode 100644 viya4-deployment-darksite/darksite-openldap-mod/README.md create mode 100644 viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh create mode 100755 viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh create mode 100755 viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh create mode 100755 viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars create mode 100755 viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml create mode 100755 viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/README.md diff --git a/viya4-deployment-darksite/README.md b/viya4-deployment-darksite/README.md new file mode 100644 index 00000000..67e415c8 --- /dev/null +++ b/viya4-deployment-darksite/README.md @@ -0,0 +1,60 @@ +# Deploy to AWS EKS in Dark Site or Air-Gapped Site scenario + +This file describes procedures, helper scripts, and example files. First decide on your deployment scenario: + +1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (dark site) - Follow procedures 1, 2, 4, and 6. +2. The deployment virtual machine and cluster has no Internet access (air-gapped site) - Follow procedures 1, 2, 5, and 6. Note: you'll still need to somehow push all the images and Helm charts to ECR from a machine with Internet access, and the deployment machine will use the private ECR endpoint in the VPC to pull these during install, so the deployment virtual machine won't need Internet access. + +**Notes:** +- The following procedures assume that the `viya4-iac-aws` project was used to deploy the EKS infrastructure. Refer to the `viya4-iac-aws-darksite` folder within the `viya4-iac-aws` [github repo](https://github.com/sassoftware/viya4-iac-aws) for the procedures to follow pertaining to IaC use with an AWS dark site configuration. +- Helper shell scripts under the `viya4-deployment-darksite` folder in this project assume that the deployment virtual machine is properly configured, confirm that: + - kubeconfig file for the EKS cluster has been installed and tested (EKS cluster admin access is verified as working) + - AWS CLI is configured + +# Procedures + +1. **Push Viya4 images to ECR (uses SAS mirrormgr tool):** + - Download deployment assets from my.sas.com + - refer to the `mirrormgr-to-ecr` folder in this repo for helper scripts + +2. **Push 3rd party images to ECR:** + - refer to the `baseline-to-ecr` folder in this repo for helper scripts + - note: OpenLDAP is only required if you are planning to use OpenLDAP for your deployment. Script to automate this is located [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/baseline-to-ecr/openldap.sh). + +3. **(Optional) If OpenLDAP is needed, modfy local viya4-deployment clone** + - Refer to the [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/darksite-openldap-mod) folder for procedures. You can build the container using the script or do it manually. + +4. **Deployment machine has Internet access - use viya4-deployment for baseline,install** + + 1. Use built in variables for baseline configurations in your `ansible-vars.yaml` file: + - Example `ansible-vars.yaml` provided [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) + - The goal here is to change the image references to point to ECR versus an Internet facing repo and add cluster subnet ID annotations for the nginx load balancers: + - Replace `{{ AWS_ACCT_ID }}` with your AWS account ID + - Replace `{{ AWS_REGION }}` with your AWS region + - Replace `{{ CONTROLLER_ECR_IMAGE_DIGEST }}` with image digest from ECR + - Replace `{{ WEBHOOK_ECR_IMAGE_DIGEST }}` with image digest from ECR + - If your VPC contains multiple subnets (unrelated to viya), you may need to add annotations to force the NLB to associate with the Viya subnets. More on that topic [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/deploy/subnet_discovery/). + + 2. Deploy viya4-deployment baseline,install. Note: the deployment virtual machine will pull the Helm charts from the Internet during this step. + +5. **Deployment machine has no Internet access - install baseline using Helm charts pulled from ECR** + - Two Options: + 1. If using OCI type repo (like ECR), we can use `viya4-deployment` but we'll need to make some changes to the baseline items in `ansible-vars.yaml`. An example provided [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) includes the needed variables for OCI Helm support. Pay close attention to `XXX_CHART_URL` and `XXX_CHART_NAME` variables. + 2. Use Helm directly to "manually" install baseline items. + - Refer to baseline-helm-install-ecr README.md for instructions. + +6. **viya4-deployment viya,install** + - **Note:** As of `viya4-deployment` v6.0.0, the project uses the Deployment Operator as the default. The deployment operator has additional considerations in a dark site deployment because the repository warehouse for the metadata will not be available without Internet access (as it is pulled from ses.sas.com). + + - There are multiple options to mitigate the issue created by using the Deployment operator: + + 1. (Easiest/Recommended) Set `V4_DEPLOYMENT_OPERATOR_ENABLED` to false. This uses the sas-orchestration method for deployment instead of the Deployment Operator (no requirement for offline repository-warehouse hosting is required). + + 2. Supply the repository information through an internally deployed http server. SAS doesn't provide instructions on how to do this, because there are a lot of ways to accomplish this. One way to accomplish this is shared in this [TS Track](https://sirius.na.sas.com/Sirius/GSTS/ShowTrack.aspx?trknum=7613552746). + + 3. Store required metadata on a file system that can be mounted to the reconciler pod (using a transformer). [TIES Blog for instructions](http://sww.sas.com/blogs/wp/technical-insights/8466/configuring-a-repository-warehouse-for-a-sas-viya-platform-deployment-at-a-dark-site/sukhda/2023/02/28) + + 4. Use DAC with `DEPLOY: false` set. This will build the manifests and references in kustomization.yaml and stop there. Then you can proceed with manual installation steps: create site.yaml and apply it to the cluster (just ensure you are using the proper kustomization version!) + + - **Important:** ensure you specify `V4_CFG_CR_URL` in your ansible-vars. This should be your ECR URL + your viya namespace! + example: I used "viya4" as my Viya namespace.... `XXXXX.dkr.ecr.{{AWS_REGION}}.amazonaws.com/viya4` diff --git a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh new file mode 100644 index 00000000..6e4fe534 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +## set variables +AWS_ACCT_ID= +AWS_REGION= + +K8S_minor_version=25 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! +DEPLOYMENT_VERSION=main # main will pull latest release of viya4-deployment. But this can be set to a specific version if needed, example: 5.2.0 + +DOCKER_SUDO= # put sudo here, if you require sudo docker commands... else leave blank \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh new file mode 100644 index 00000000..62a9c736 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +source 00_vars.sh + +. auto_scaler.sh +. cert_manager.sh +. ingress_nginx.sh +. metrics_server.sh +. nfs_subdir_external_provisioner.sh +. openldap.sh +. ebs_driver.sh \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/README.md b/viya4-deployment-darksite/baseline-to-ecr/README.md new file mode 100644 index 00000000..c3dc4f20 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/README.md @@ -0,0 +1,13 @@ +These scripts assume your aws cli and your kubeconfig is already configured! + +Notes: +- requires helm, yq, and aws cli +- these scripts will install the helm charts and corresponding container images to ECR for each baseline item. +- it will automatically set the chart version based on the version of DAC you specify. + +## Step 1: Set your variables +- Set your variables in 00_vars.sh + +## Step 2: Run script(s) +- Option 1: run 01_run_all.sh (runs all scripts) +- Option 2: run scripts individually \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh new file mode 100644 index 00000000..272df9c7 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +source 00_vars.sh + +# account for v6.3.0+ changes - autoscaler now supports k8s 1.25 +DV=$(echo $DEPLOYMENT_VERSION | sed 's/\.//g') +if [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -ge 25 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') +elif [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -le 24 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') +elif [ $DV -ge 630 ] && [ $K8S_minor_version -ge 25 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') +elif [ $DV -ge 630 ] && [ $K8S_minor_version -le 24 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') +elif [ $DV -le 620 ] ; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CLUSTER_AUTOSCALER_CHART_VERSION') +fi + +## get chart version from viya4-deployment repo +echo "**** cluster-autoscaler ****" +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add autoscaling https://kubernetes.github.io/autoscaler +helm repo update +IMG_REPO=$(helm show values autoscaling/cluster-autoscaler --version=$CHART_VERSION | yq '.image.repository') +TAG=$(helm show values autoscaling/cluster-autoscaler --version=$CHART_VERSION | yq '.image.tag') +echo "Image repo: $IMG_REPO" && echo "Image tag: $TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $IMG_REPO:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name cluster-autoscaler + +# push the helm chart to the ECR repo +helm pull autoscaling/cluster-autoscaler --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push cluster-autoscaler-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm cluster-autoscaler-$CHART_VERSION.tgz + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $IMG_REPO:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG + + +# # ## auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh new file mode 100644 index 00000000..57f34626 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +source 00_vars.sh + + +## get chart version from viya4-deployment repo +echo "**** cert-manager ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CERT_MANAGER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add jetstack https://charts.jetstack.io/ +helm repo update +IMG_CONTROLLER=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.image.repository') +IMG_WEBHOOK=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.webhook.image.repository') +IMG_CAINJECTOR=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.cainjector.image.repository') +IMG_STARTUP=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.startupapicheck.image.repository') +echo "controller repo: $IMG_CONTROLLER" && echo "webhook repo: $IMG_WEBHOOK" && echo "cainject repo: $IMG_CAINJECTOR" && echo "startupapicheck repo: $IMG_STARTUP" +echo "*********************" + + +## pull the images +$DOCKER_SUDO docker pull $IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_STARTUP:v$CHART_VERSION + + +# create ECR repos +aws ecr create-repository --no-cli-pager --repository-name cert-manager # this repo is used to store the helm chart +aws ecr create-repository --no-cli-pager --repository-name $IMG_CONTROLLER +aws ecr create-repository --no-cli-pager --repository-name $IMG_WEBHOOK +aws ecr create-repository --no-cli-pager --repository-name $IMG_CAINJECTOR +aws ecr create-repository --no-cli-pager --repository-name $IMG_STARTUP + +# push the helm charts to the ECR repo +helm pull jetstack/cert-manager --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push cert-manager-v$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm cert-manager-v$CHART_VERSION.tgz + +# ## update local images tags appropriately +$DOCKER_SUDO docker tag $IMG_CONTROLLER:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_WEBHOOK:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_CAINJECTOR:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_STARTUP:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local images to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh new file mode 100644 index 00000000..58223a74 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +source 00_vars.sh + +## get chart version from viya4-deployment repo +echo -e "\n**** aws-ebs-csi-driver ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.EBS_CSI_DRIVER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver +helm repo update +HELM_CHART=$(helm show all aws-ebs-csi-driver/aws-ebs-csi-driver --version=$CHART_VERSION) +# echo "$HELM_CHART" +IMG_REPO=$(echo "$HELM_CHART" | yq -N '.image.repository | select(. != null)') +IMG_TAG=$(echo "$HELM_CHART" | yq -N '.appVersion | select(. != null)') +PROVISIONER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.provisioner.image.repository | select(. != null)') +PROVISIONER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.provisioner.image.tag | select(. != null)') +ATTACHER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.attacher.image.repository | select(. != null)') +ATTACHER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.attacher.image.tag | select(. != null)') +SNAPSHOTTER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.snapshotter.image.repository | select(. != null)') +SNAPSHOTTER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.snapshotter.image.tag | select(. != null)') +LIVENESS_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.livenessProbe.image.repository | select(. != null)') +LIVENESS_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.livenessProbe.image.tag | select(. != null)') +RESIZER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.resizer.image.repository | select(. != null)') +RESIZER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.resizer.image.tag | select(. != null)') +NODEREG_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.nodeDriverRegistrar.image.repository | select(. != null)') +NODEREG_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.nodeDriverRegistrar.image.tag | select(. != null)') +echo "Driver image repo: $IMG_REPO" && echo "Image tag: v$IMG_TAG" +echo "Provisioning image repo: $PROVISIONER_REPO" && echo "Image tag: $PROVISIONER_TAG" +echo "Attacher image repo: $ATTACHER_REPO" && echo "Image tag: $ATTACHER_TAG" +echo "Snapshotter image repo: $SNAPSHOTTER_REPO" && echo "Image tag: $SNAPSHOTTER_TAG" +echo "Liveness image repo: $LIVENESS_REPO" && echo "Image tag: $LIVENESS_TAG" +echo "Resizer image repo: $RESIZER_REPO" && echo "Image tag: $RESIZER_TAG" +echo "NodeDriverRegister image repo: $NODEREG_REP" && echo "Image tag: $NODEREG_TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker pull $PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker pull $ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker pull $SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker pull $LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker pull $RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker pull $NODEREG_REPO:$NODEREG_TAG + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name aws-ebs-csi-driver # this is to house to helm chart +aws ecr create-repository --no-cli-pager --repository-name $IMG_REPO +aws ecr create-repository --no-cli-pager --repository-name $PROVISIONER_REPO +aws ecr create-repository --no-cli-pager --repository-name $ATTACHER_REPO +aws ecr create-repository --no-cli-pager --repository-name $SNAPSHOTTER_REPO +aws ecr create-repository --no-cli-pager --repository-name $LIVENESS_REPO +aws ecr create-repository --no-cli-pager --repository-name $RESIZER_REPO +aws ecr create-repository --no-cli-pager --repository-name $NODEREG_REPO + +# push the helm chart to the ECR repo +helm pull aws-ebs-csi-driver/aws-ebs-csi-driver --version=$CHART_VERSION +aws ecr get-login-password \ + --no-cli-pager \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push aws-ebs-csi-driver-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm aws-ebs-csi-driver-$CHART_VERSION.tgz + +# update local image tag appropriately +$DOCKER_SUDO docker tag $IMG_REPO:v$IMG_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker tag $PROVISIONER_REPO:$PROVISIONER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker tag $ATTACHER_REPO:$ATTACHER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker tag $SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker tag $LIVENESS_REPO:$LIVENESS_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker tag $RESIZER_REPO:$RESIZER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker tag $NODEREG_REPO:$NODEREG_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$NODEREG_REPO:$NODEREG_TAG + +# auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$NODEREG_REPO:$NODEREG_TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh new file mode 100644 index 00000000..4e578c17 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +source 00_vars.sh + +# determine chart version to use +V_CEILING=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionCeiling.value') +V_FLOOR=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionFloor.value') + +if [ $K8S_minor_version -ge $V_FLOOR ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionFloor.api.chartVersion') + echo "Helm chart version: $CHART_VERSION" +elif [ $K8S_minor_version -le $V_CEILING ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionCeiling.api.chartVersion') + echo "Helm chart version: $CHART_VERSION" +else + echo "Error with your minor version! Exiting..." + exit 1 +fi + +## Get helm chart info +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +CONTROLLER_REGISTRY=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.registry') +CONTROLLER_IMAGE=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.image') +CONTROLLER_TAG=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.tag') +WEBHOOKS_REGISTRY=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.registry') +WEBHOOKS_TAG=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.tag') +WEBHOOKS_IMAGE=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.image') +echo "controller repo: $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG" && echo "webhook repo: $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG" +echo "*********************" + + +## pull the image +$DOCKER_SUDO docker pull $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker pull $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name ingress-nginx # this repo is used to store the helm chart +aws ecr create-repository --no-cli-pager --repository-name $CONTROLLER_IMAGE +aws ecr create-repository --no-cli-pager --repository-name $WEBHOOKS_IMAGE + +# push the helm charts to the ECR repo +helm pull ingress-nginx/ingress-nginx --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push ingress-nginx-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm ingress-nginx-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker tag $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh new file mode 100644 index 00000000..abeb0262 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +source 00_vars.sh + +echo "**** metrics-server ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.METRICS_SERVER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update +REGISTRY=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.registry') +TAG=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.tag') +IMAGE=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.repository') +echo "Image repo: $REGISTRY/$IMAGE:$TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $REGISTRY/$IMAGE:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name metrics-server + +# push the helm chart to the ECR repo +helm pull bitnami/metrics-server --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push metrics-server-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm metrics-server-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $REGISTRY/$IMAGE:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG + + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh new file mode 100644 index 00000000..7d3408f8 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +source 00_vars.sh + +echo "**** nfs-subdir-external-provisioner ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.NFS_CLIENT_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ +helm repo update +REPOSITORY=$(helm show values nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION | yq '.image.repository') +TAG=$(helm show values nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION | yq '.image.tag') +echo "Image repo: $REPOSITORY:$TAG" +echo "*****************************************" + +## pull the image +$DOCKER_SUDO docker pull $REPOSITORY:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name nfs-subdir-external-provisioner + +# push the helm chart to the ECR repo +helm pull nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push nfs-subdir-external-provisioner-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm nfs-subdir-external-provisioner-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $REPOSITORY:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG + + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh new file mode 100644 index 00000000..88a86c2a --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +source 00_vars.sh + +echo "**** openldap ****" +IMAGE=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/vdm/templates/resources/openldap.yaml | yq -N '.spec.template.spec.containers[0].image | select(. != null)') +echo "Image: $IMAGE" +echo "******************" + +## pull the image +$DOCKER_SUDO docker pull $IMAGE + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name osixia/openldap + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $IMAGE $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE + + +# # ## auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE \ No newline at end of file diff --git a/viya4-deployment-darksite/darksite-openldap-mod/README.md b/viya4-deployment-darksite/darksite-openldap-mod/README.md new file mode 100644 index 00000000..8912dab2 --- /dev/null +++ b/viya4-deployment-darksite/darksite-openldap-mod/README.md @@ -0,0 +1,6 @@ + +## Mod roles/vdm/templates/resources/openldap.yaml + +- Only required if using an internal OpenLDAP server. By default, the cluster will reach out to docker hub to pull this image, and in a darksite this isn't possible. +- Run the darksite-openldap-mod.sh script. +- Build the modded container using the script or manually if you'd like. \ No newline at end of file diff --git a/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh new file mode 100644 index 00000000..f10c9de2 --- /dev/null +++ b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh @@ -0,0 +1,186 @@ +#!/bin/bash + +# helper script to easily mod viya4-deployment when using openldap in a darksite + + +## check that viya4-deployment/ exists in this folder +if [ ! -d "viya4-deployment/" ] +then + echo -e "\nError: Directory viya4-deployment/ does not exists!\n" + read -p "Would you like to locally clone the viya4-deployment github repo to fix (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + ## Get desired DAC version + read -p "What release version of DAC do you want to use? " -r IAC_VERSION + git clone --branch $IAC_VERSION https://github.com/sassoftware/viya4-deployment.git +fi + +echo +read -p "What is your aws account id? " -r AWS_ACCT_ID +read -p "What is your aws region? " -r AWS_REGION + +echo -e "\n+++Modding viya4-deployment/roles/vdm/templates/resources/openldap.yaml ..." + +tee viya4-deployment/roles/vdm/templates/resources/openldap.yaml > /dev/null << EOF +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openldap +spec: + replicas: 1 + selector: + matchLabels: + app: openldap + template: + metadata: + labels: + app: openldap + spec: + hostname: ldap-svc + imagePullSecrets: [] + containers: + - image: "${AWS_ACCT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/osixia/openldap:1.3.0" + imagePullPolicy: IfNotPresent + name: openldap + ports: + - containerPort: 389 + args: + - --copy-service + env: + - name: LDAP_TLS + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_TLS + - name: LDAP_ADMIN_PASSWORD + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_ADMIN_PASSWORD + - name: LDAP_DOMAIN + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_DOMAIN + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_REMOVE_CONFIG_AFTER_SETUP + - name: DISABLE_CHOWN + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: DISABLE_CHOWN + volumeMounts: + - name: bootstrap-custom + mountPath: "/container/service/slapd/assets/config/bootstrap/ldif/custom" + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: workload.sas.com/class + operator: In + values: + - stateless + matchFields: [] + weight: 100 + - preference: + matchExpressions: + - key: workload.sas.com/class + operator: NotIn + values: + - compute + - cas + - stateful + - connect + matchFields: [] + weight: 50 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/mode + operator: NotIn + values: + - system + matchFields: [] + tolerations: + - effect: NoSchedule + key: workload.sas.com/class + operator: Equal + value: stateful + - effect: NoSchedule + key: workload.sas.com/class + operator: Equal + value: stateless + volumes: + - name: bootstrap-custom + emptyDir: {} + - name: ldap-bootstrap-config + configMap: + name: openldap-bootstrap-config + items: + - key: LDAP_USERS_CONF + path: 07-testUsers.ldif + mode: 0664 + - key: LDAP_GROUPS_CONF + path: 06-testGroups.ldif + mode: 0664 + initContainers: + - name: ldap-init + image: "${AWS_ACCT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/osixia/openldap:1.3.0" + command: + - bash + - -c + - "cp -avRL /tmp/ldif/custom/* /container/service/slapd/assets/config/bootstrap/ldif/custom/" + volumeMounts: + - name: bootstrap-custom + mountPath: "/container/service/slapd/assets/config/bootstrap/ldif/custom" + - name: ldap-bootstrap-config + mountPath: "/tmp/ldif/custom" +--- +apiVersion: v1 +kind: Service +metadata: + name: ldap-svc +spec: + ports: + - port: 389 + protocol: TCP + targetPort: 389 + name: ldap + selector: + app: openldap +EOF + +echo -e "\n+++Mod complete!" + +# build modded viya4-deployment docker container? +echo +read -p "Would you like to build the modded viya4-deployment docker container (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + read -p " What tag would you like to use for the modded container? " -r TAG + docker build -t viya4-deployment:$TAG viya4-deployment/ + echo -e "\n+++Modded docker container is: viya4-deployment:${TAG}" +fi + +# push modded docker container to ECR +echo +read -p "Would you like to push the viya4-deployment:${TAG} docker container to ECR (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 +fi + +aws ecr create-repository --no-cli-pager --repository-name viya4-deployment + +docker tag viya4-deployment:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG + +aws ecr get-login-password --no-cli-pager --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh new file mode 100755 index 00000000..142550ff --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# what is the tag +read -p "What is the tag for your viya4-iac-aws container? " -r TAG +# what is the job +read -p "What type of IaC job: plan, apply, or destroy? " -r REPLY + +# preview job +if [ $REPLY == "plan" ]; then + echo -e "\n+++Starting plan job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + plan -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate +fi + +# apply job +if [ $REPLY == "apply" ]; then + echo -e "\n+++Starting apply job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + apply -auto-approve -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate + + # Update the kubeconfig using aws cli and place here on deploy machine: ~/.kube/config + aws eks update-kubeconfig --name darksite-lab-eks + rm /home/$USER/viya/infrastructure/darksite-lab-eks-kubeconfig.conf +fi + +# destroy job +if [ $REPLY == "destroy" ]; then + read -p "Are you sure you want to continue (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + echo -e "\n+++Starting destroy job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + destroy -auto-approve -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate +fi \ No newline at end of file diff --git a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh new file mode 100755 index 00000000..01fff311 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# get viya4-deployment container tag +echo -e "\n" +read -p "What is your viya4-deployment container tag? " -r DOCKER_TAG + +TASKS=("baseline" "viya" "cluster-logging" "cluster-monitoring" "viya-monitoring" "install" "uninstall") + +##### FUNCTIONS ##### +function docker_run() { + echo "starting $tags job..." + docker run --rm \ + --group-add root \ + --user $(id -u):$(id -g) \ + --volume $(pwd)/infrastructure/ssh/id_rsa:/config/jump_svr_private_key \ + --volume $(pwd)/infrastructure/terraform.tfstate:/config/tfstate \ + --volume /home/ec2-user/.kube/config:/.kube/config \ + --volume $(pwd)/software/deployments:/data \ + --volume $(pwd)/software/viya_order_assets:/viya_order_assets \ + --volume $(pwd)/software/ansible-vars-iac.yaml:/config/config \ + --volume $(pwd)/software/ingress:/ingress \ + --volume $(pwd)/software/sitedefault.yaml:/sitedefault/sitedefault.yaml \ + viya4-deployment:$DOCKER_TAG --tags "$tags" +} + +function join_by { + local d=${1-} f=${2-} + if shift 2; then + printf %s "$f" "${@/#/$d}" + fi +} + +##### MAIN SCRIPT ##### +if [ $# -eq 0 ] +then + # what are the deploy tags + echo + echo "You didn't provide deployment tags!" + echo + echo "Tasks: baseline viya cluster-logging cluster-monitoring viya-monitoring" + echo "Actions: install uninstall" + echo + echo ' -All tasks and actions must be separated by "," ' + echo " -At least one task must be supplied. Multiple tasks are allowed. " + echo " -An action is required and must be the last and ONLY action provided." + echo + echo "Examples: baseline,viya,install" + echo " viya,uninstall " + echo + echo -n "What are your deployment tags? " + read -r REPLY +else + REPLY=$* +fi + +# split REPLY into an array +IFS=',' read -r -a array <<< "$REPLY" +# remove spaces in array elements +clean=() +for i in "${array[@]}"; do + i=${i// /} + clean+=("$i") +done + +# check if provided tasks are valid +for i in "${clean[@]}"; do + inarray=$(echo ${TASKS[@]} | grep -ow "$i" | wc -w) + if [ $inarray == 0 ]; then + echo $i "is not a valid input." + exit 0 + fi +done + +# check that more than one tag is provided +len=${#clean[@]} +if [ $len -lt 2 ]; then + echo "Not enough tags provided!" + exit 0 +fi + +# check if install and uninstall is provided correctly +count=0 +for i in "${clean[@]}"; do + if [ $i == "install" ] || [ $i == "uninstall" ]; then + (( count++ )) + fi +done +if [ $count == 0 ]; then + echo "You didn't provide an install or uninstall action!" + exit 0 +elif [ $count -gt 1 ]; then + echo "You can only have one action: install or uninstall!" + exit 0 +fi +# check that install/uninstall is last value +last="${clean[-1]}" +if [ "$last" != "install" ] && [ "$last" != "uninstall" ]; then + echo "install or uninstall must be last tag value!" + exit 0 +fi + +# if uninstall job, double check before continuing! +if [ "$last" == "uninstall" ]; then + read -p "Are you really sure you want to continue; this action is destructive!! (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi +fi + +# all checks passed so build the tags string +tags=$(join_by , ${clean[*]}) + +# run the function +docker_run + +# remove downloaded assets +if [ -f software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz ]; then + rm software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz +fi diff --git a/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars b/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars new file mode 100755 index 00000000..856f683a --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars @@ -0,0 +1,111 @@ +# !NOTE! - These are only a subset of the variables in CONFIG-VARS.md provided +# as examples. Customize this file to add any variables from CONFIG-VARS.md whose +# default values you want to change. + +# **************** REQUIRED VARIABLES **************** +# These required variables' values MUST be provided by the User +prefix = "darksite-lab" +location = "" # e.g., "us-east-1" +# **************** REQUIRED VARIABLES **************** + +# Bring your own existing resources - get values from AWS console or VPC/Subnet provisioning script outputs +vpc_id = "PrivateVPCId" +subnet_ids = { # only needed if using pre-existing subnets + "public" : ["PrivateSubnetAId", "PrivateSubnetBId"], + "private" : ["PrivateSubnetAId", "PrivateSubnetBId"], + "control_plane" : ["ControlPlaneSubnetAId", "ControlPlaneSubnetBId"], + "database" : ["PrivateSubnetAId", "PrivateSubnetBId"] # only when 'create_postgres=true' +} + +security_group_id = "PrivateVpcSGId" +cluster_security_group_id = "PrivateClusterControlSGId" +workers_security_group_id = "PrivateClusterWorkersSGId" + +# !NOTE! - Without specifying your CIDR block access rules, ingress traffic +# to your cluster will be blocked by default. + +# ************** RECOMMENDED VARIABLES *************** +default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"] # not required in a darksite +ssh_public_key = "/workspace/ssh/id_rsa.pub" # container path to ssh public key used for jumpserver +# ************** RECOMMENDED VARIABLES *************** + +# Tags for all tagable items in your cluster. +tags = { } # e.g., { "key1" = "value1", "key2" = "value2" } + +# Postgres config - By having this entry a database server is created. If you do not +# need an external database server remove the 'postgres_servers' +# block below. +# postgres_servers = { +# default = {}, +# } + +## Cluster config +cluster_api_mode = "private" +kubernetes_version = "1.26" +default_nodepool_node_count = 1 +default_nodepool_vm_type = "m5.2xlarge" + +## General +storage_type = "standard" +nfs_raid_disk_type = "gp3" +nfs_raid_disk_iops = "3000" + +## Cluster Node Pools config +node_pools = { + cas = { + "vm_type" = "m5.2xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 1 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=cas:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "cas" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + }, + compute = { + "vm_type" = "m5.8xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 1 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=compute:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "compute" + "launcher.sas.com/prepullImage" = "sas-programming-environment" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + }, + services = { + "vm_type" = "m5.4xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 0 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "stateful" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + } +} + +# Jump Server +create_jump_vm = true +create_jump_public_ip = false diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml new file mode 100755 index 00000000..ee7cc7d0 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml @@ -0,0 +1,200 @@ +## Cluster +NAMESPACE: viya + +## MISC +DEPLOY: true # Set to false to stop at generating the manifest +LOADBALANCER_SOURCE_RANGES: ['192.168.8.0/24'] +KUBECONFIG: /.kube/config +V4_DEPLOYMENT_OPERATOR_ENABLED: false # sas-orchestration does not phone home for entitlements (set to false for darksite) + +## Storage +V4_CFG_MANAGE_STORAGE: true +#V4_CFG_RWX_FILESTORE_PATH: "/" # NOTE: EFS is "/" but NFS is "/export" (for NFS) + +## SAS Software Order +V4_CFG_ORDER_NUMBER: # order number +V4_CFG_CADENCE_NAME: # stable or lts +V4_CFG_CADENCE_VERSION: # cadence version +## Providing the following three variables will bypass DAC using SAS Viya API (DAC 6.2.0+): +V4_CFG_DEPLOYMENT_ASSETS: /viya_order_assets/ # container path to deployment assets +V4_CFG_LICENSE: /viya_order_assets/ # container path to license file (.jwt) +V4_CFG_CERTS: /viya_order_assets/ # container path to viya certs + +## Path to sitedefault.yaml +V4_CFG_SITEDEFAULT: /sitedefault/sitedefault.yaml # container path to sitedefault.yaml + +## CR Access +V4_CFG_CR_URL: "{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/viya" # update this for your account and region + +## Ingress +V4_CFG_INGRESS_TYPE: ingress +V4_CFG_INGRESS_MODE: "private" +# if no FQDN dns registration, use the DNS of the private NLB, here is a way to get that automatically: +# V4_CFG_INGRESS_FQDN: $(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath={'.status.loadBalancer.ingress[0].ip'}) +V4_CFG_INGRESS_FQDN: +V4_CFG_TLS_MODE: "full-stack" # [full-stack|front-door|ingress-only|disabled] + +## Postgres +V4_CFG_POSTGRES_SERVERS: + default: + internal: true + postgres_pvc_storage_size: 10Gi + postgres_pvc_access_mode: ReadWriteOnce + postgres_storage_class: sas + backrest_storage_class: sas + +## LDAP +V4_CFG_EMBEDDED_LDAP_ENABLE: true # Note: will require the DaC tool (openldap deployment) to be modded to point to ECR for openldap container image + +## Baseline configs are specifically for repos that use OCI for helm charts (like ECR) + +## Cert-manager config +CERT_MANAGER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +CERT_MANAGER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cert-manager +CERT_MANAGER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-controller + webhook: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-webhook + cainjector: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-cainjector + startupapicheck: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-ctl + installCRDs: "true" + extraArgs: + - --enable-certificate-owner-ref=true + +## Metrics-server config +METRICS_SERVER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +METRICS_SERVER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/metrics-server +METRICS_SERVER_CONFIG: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + repository: metrics-server + apiService: + create: true + +## NGINX config +INGRESS_NGINX_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +INGRESS_NGINX_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/ingress-nginx +INGRESS_NGINX_CONFIG: + controller: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + image: ingress-nginx/controller + digest: {{ CONTROLLER_ECR_IMAGE_DIGEST }} + admissionWebhooks: + patch: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + image: ingress-nginx/kube-webhook-certgen + digest: {{ WEBHOOK_ECR_IMAGE_DIGEST }} + service: + externalTrafficPolicy: Local + sessionAffinity: None + loadBalancerSourceRanges: "{{ LOADBALANCER_SOURCE_RANGES |default(['0.0.0.0/0'], -1) }}" + config: + use-forwarded-headers: "true" + hsts-max-age: "63072000" + tcp: {} + udp: {} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep 5; /usr/local/nginx/sbin/nginx -c /etc/nginx/nginx.conf -s quit; while pgrep -x nginx; do sleep 1; done"] + terminationGracePeriodSeconds: 600 + +# nfs client config +NFS_CLIENT_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +NFS_CLIENT_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner +NFS_CLIENT_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner + nfs: + server: "{{ V4_CFG_RWX_FILESTORE_ENDPOINT }}" + path: "{{ V4_CFG_RWX_FILESTORE_PATH | replace('/$', '') }}/pvs" + mountOptions: + - noatime + - nodiratime + - 'rsize=262144' + - 'wsize=262144' + storageClass: + archiveOnDelete: "false" + name: sas + +# pg-storage class config +PG_NFS_CLIENT_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +PG_NFS_CLIENT_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner +PG_NFS_CLIENT_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner + nfs: + server: "{{ V4_CFG_RWX_FILESTORE_ENDPOINT }}" + path: "{{ V4_CFG_RWX_FILESTORE_PATH | replace('/$', '') }}/pvs" + mountOptions: + - noatime + - nodiratime + - 'rsize=262144' + - 'wsize=262144' + storageClass: + archiveOnDelete: "false" + reclaimPolicy: "Retain" + name: pg-storage + +# auto-scaler +CLUSTER_AUTOSCALER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +CLUSTER_AUTOSCALER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cluster-autoscaler +CLUSTER_AUTOSCALER_LOCATION: {{ AWS_REGION }} +CLUSTER_AUTOSCALER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cluster-autoscaler + awsRegion: "{{ CLUSTER_AUTOSCALER_LOCATION }}" + autoDiscovery: + clusterName: "{{ CLUSTER_NAME }}" + rbac: + serviceAccount: + name: cluster-autoscaler + annotations: + "eks.amazonaws.com/role-arn": "{{ CLUSTER_AUTOSCALER_ACCOUNT }}" + "eks.amazonaws.com/sts-regional-endpoints": “true” + extraEnv: + AWS_STS_REGIONAL_ENDPOINTS: regional + extraArgs: + aws-use-static-instance-list: true # this keeps autoscaler from going to the internet for the ec2 list on init, auto-scaler will fail in darksite without this + +# EBS CSI DRIVER +EBS_CSI_DRIVER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +EBS_CSI_DRIVER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/aws-ebs-csi-driver +EBS_CSI_DRIVER_LOCATION: {{ AWS_REGION }} +EBS_CSI_DRIVER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver + sidecars: + provisioner: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-provisioner + attacher: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-attacher + snapshotter: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-snapshotter + livenessProbe: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/livenessprobe + resizer: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-resizer + nodeDriverRegistrar: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-node-driver-registrar + controller: + region: "{{ EBS_CSI_DRIVER_LOCATION }}" + serviceAccount: + create: true + name: ebs-csi-controller-sa + annotations: + "eks.amazonaws.com/role-arn": "{{ EBS_CSI_DRIVER_ACCOUNT }}" diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml new file mode 100755 index 00000000..6a84adc2 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml @@ -0,0 +1,26 @@ +cacerts: +config: + application: + sas.identities.providers.ldap.connection: + host: ldap-svc + password: Password123 + port: 389 + url: ldap://${sas.identities.providers.ldap.connection.host}:${sas.identities.providers.ldap.connection.port} + userDN: cn=admin,dc=example,dc=com + sas.identities.providers.ldap.group: + baseDN: ou=groups,dc=example,dc=com + accountId: cn + member: uniqueMember + memberOf: memberOf + objectClass: groupOfUniqueNames + objectFilter: (objectClass=groupOfUniqueNames) + searchFilter: cn={0} + sas.identities.providers.ldap.user: + baseDN: ou=people,dc=example,dc=com + accountId: uid + memberOf: memberOf + objectClass: inetOrgPerson + objectFilter: (objectClass=inetOrgPerson) + searchFilter: uid={0} + sas.logon.initial.password: Password123 +config/identities/sas.identities/administrator: viya_admin \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh new file mode 100644 index 00000000..7cde5e20 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +AWS_ACCT_ID= +AWS_REGION= + +K8S_minor_version=24 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md b/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md new file mode 100644 index 00000000..3c75be84 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md @@ -0,0 +1,37 @@ +# Gather some facts before running these scripts: + +## Global Variables (00_vars.sh) +1. AWS Account ID +2. AWS Region +3. K8s minor version + +## metrics-server +1. helm chart version + +## auto-scaler +1. helm chart version +2. Cluster name +3. Autoscaler ARN + +## ingress-nginx +1. helm chart version +2. controller image digest (sha256) - get this from your ECR +3. webhook image digest (sha256) - get this from your ECR +4. load balancer source ranges (must be list example: ["0.0.0.0/0"]) + +## nfs-subdir-external-provisioner +1. nfs-subdir-external-provisioner helm chart version +2. RWX filestore endpoint (IP or DNS for endpoint..) +3. RWX filestore path (don't include ../pvs as it is already appended in the script) + +## pg-nfs-provisioner +1. helm chart version +2. RWX filestore endpoint (IP or DNS for endpoint..) +3. RWX filestore path (don't include ../pvs as it is already appended in the script) + +## cert-manager +1. helm chart version (don't include the proceeding v, it is already appended in the script) + +## ebs-csi-driver +1. helm chart version +2. eks.amazonaws.com/role-arn for EBS_CSI_DRIVER \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh new file mode 100644 index 00000000..99767e64 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "cluster name: " cluster_name +read -p "autoscaler ARN: " autoscaler_arn +read -p "cluster-autoscaler helm chart version: " CHART_VERSION + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "Installing auto-scaler...\n\n" +helm upgrade --cleanup-on-fail \ + --install cluster-autoscaler oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler \ + --version=$CHART_VERSION \ + --values tmp.yaml + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh new file mode 100644 index 00000000..1b850f96 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p 'cert-manager helm chart version (do not include the proceeding "v"): ' CHART_VERSION + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing cert-manager...\n\n" + +helm upgrade --cleanup-on-fail \ + --install cert-manager oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cert-manager \ + --version=v$CHART_VERSION \ + --values tmp.yaml \ + --namespace cert-manager \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh new file mode 100644 index 00000000..3bff5d90 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +## installs ebs-csi-driver via helm + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "What is the aws-ebs-csi-driver helm chart version? " CHART_VERSION +read -p "What is the eks.amazonaws.com/role-arn for EBS_CSI_DRIVER? " ARN + +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "\nInstalling ingress-nginx...\n" +helm upgrade --cleanup-on-fail \ + --install aws-ebs-csi-driver oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/aws-ebs-csi-driver --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace kube-system + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh new file mode 100644 index 00000000..f31fdcda --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +## installs this by default: +# - INGRESS_NGINX_CVE_2021_25742_PATCH +# - ingress-nginx private ingress + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "ingress-nginx helm chart version: " CHART_VERSION +read -p "controller image digest (sha256): " CONTROLLER_DIGEST +read -p "webhook image digest (sha256): " WEBHOOK_DIGEST +read -p 'load balancer source ranges? must be a list (example): ["0.0.0.0/0"] ' LB + +# handle version differences with webhook path +CHART_VERSION_INT=$(echo "${CHART_VERSION//.}") +if [ $CHART_VERSION_INT -lt 411 ]; then + WEBHOOK_PATH=jettech +elif [ $CHART_VERSION_INT -ge 411 ]; then + WEBHOOK_PATH=ingress-nginx +else + echo "Error with your helm chart versions! Exiting..." + exit 1 +fi + +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "\nInstalling ingress-nginx...\n" +helm upgrade --cleanup-on-fail \ + --install ingress-nginx oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ingress-nginx --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace ingress-nginx \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh new file mode 100644 index 00000000..029290f9 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "metrics-server helm chart version: " CHART_VERSION + +echo -e "Installing metrics-server...\n\n" + +helm upgrade --cleanup-on-fail \ + --install metrics-server oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server --version=$CHART_VERSION \ + --set image.registry=$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com \ + --set image.repository=metrics-server \ + --set apiService.create=true + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh new file mode 100644 index 00000000..d9c3bcf1 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "nfs-subdir-external-provisioner helm chart version: " CHART_VERSION +read -p "RWX filestore endpoint: " ENDPOINT +read -p "RWX filestore path (don't include ../pvs): " ENDPOINT_PATH + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing nfs-subdir-external-provisioner...\n\n" + +helm upgrade --cleanup-on-fail \ + --install nfs-subdir-external-provisioner oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner \ + --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace nfs-client \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh new file mode 100644 index 00000000..0e704125 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "nfs-subdir-external-provisioner helm chart version: " CHART_VERSION +read -p "RWX filestore endpoint: " ENDPOINT +read -p "RWX filestore path (don't include ../pvs): " ENDPOINT_PATH + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing nfs-subdir-external-provisioner...\n\n" + +helm upgrade --cleanup-on-fail \ + --install nfs-subdir-external-provisioner-pg-storage oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner \ + --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace nfs-client \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh new file mode 100644 index 00000000..57ade5eb --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +NAMESPACE= # namespace used for your viya install +AWS_ACCT_ID= # your aws account ID +REGION= # your aws region +CERTS=~/viya/software/viya_order_assets/SASViyaV4_XXXX_certs.zip # path to the _certs.zip file +ASSETS=~/viya/software/viya_order_assets/SASViyaV4_XXX_XXXX-XXXX_deploymentAssets.tgz # path to the tgz assets file \ No newline at end of file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh new file mode 100644 index 00000000..b5fcf3ff --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +## mirrormgr must be installed and in $PATH prior to running this script +## aws cli should be configured prior to running this script +## place your downloaded assets in the assets/ folder + +### source variables from 00_vars.sh +source 00_vars.sh + + +# create repositories? +echo +read -p "Do you need to create the ECR repositories? (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + # check if ECR repositories exist and create + for repo in $(mirrormgr list target docker repos --deployment-data $CERTS --destination $NAMESPACE) ; do + aws ecr create-repository --repository-name $repo --region $REGION + done +fi + + +# proceed with mirroring images? +echo +read -p "Proceed with mirroring images? this will take some time... (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + # populate the repositories.. this will take some time! + mirrormgr mirror registry -p ./sas_repos \ + --deployment-data $CERTS \ + --deployment-assets $ASSETS \ + --destination https://$AWS_ACCT_ID.dkr.ecr.$REGION.amazonaws.com/$NAMESPACE \ + --username 'AWS' \ + --password $(aws ecr get-login-password --region $REGION) +fi \ No newline at end of file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh new file mode 100644 index 00000000..f628fb57 --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# this script will help you quickly clean up viya related ECR repos + +### source variables from 00_vars.sh +source 00_vars.sh + +# get all the repos within the aws subscription +REPOS=$(aws ecr describe-repositories --region $REGION) + +# delete the SAS Viya repos +read -p "Are you sure you'd like to delete all SAS Viya repos and images (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + echo $REPOS | jq -r --arg keyword $NAMESPACE '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done +fi + +# delete the 3rd party repos +read -p "Are you sure you'd like to delete all 3rd party SAS Viya related repos and images (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + echo $REPOS | jq -r --arg keyword cert-manager '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword cluster-autoscaler '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword ingress-nginx '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword nfs-subdir-external-provisioner '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword metrics-server '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword openldap '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done +fi diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/README.md b/viya4-deployment-darksite/mirrormgr-to-ecr/README.md new file mode 100644 index 00000000..c53de24f --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/README.md @@ -0,0 +1,23 @@ +## Helper Script to help with mirrormgr + +SAS documentation specific to using mirrormgr for AWS ECR located [here](https://go.documentation.sas.com/doc/en/itopscdc/v_029/dplyml0phy0dkr/p0lexw9inr33ofn1tbo69twarhlx.htm). + +## Step 1: Download Order Assets +- Download order assets [here](https://my.sas.com/en/my-orders.html). Check all under "order assets". + +## Step 2: Unzip to assets/ folder +- Unzip multipleAssets zip to assets/ folder ... if following the darksite-lab: place in /home/ec2-user/viya/software/viya_order_assets + +## Step 3: Install mirrormgr +- Download [here](https://support.sas.com/en/documentation/install-center/viya/deployment-tools/4/mirror-manager.html). + +## Step 4: Update variables in 00_vars.sh + +## Step 5: Run mirrormgr-ecr.sh +- The script assumes your AWS CLI is already configured. +- This script will use `mirrormgr` to create AWS ECR repos for each viya4 image (AWS requirement). +- This script will download the viya4 images locally, then using `mirrormgr`, automatically push them to the appropriate ECR repo. + - This will take some time based on your local bandwidth. Note: the images are around ~120GiB total. + +## Helper script to help clean up ECR: cleanup-ecr.sh +- This script uses AWS CLI to delete all the SAS Viya and 3rd party repositories and images. This makes life easier when you need to clean up the AWS ECR.