From d46a8e1f8c45054c0db6f40a113ee599ba153c63 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Mon, 8 Apr 2024 21:48:42 -0400 Subject: [PATCH 01/13] feat: (IAC-1117) dark site deployment --- viya4-deployment-darksite/README.md | 60 ++++++ .../baseline-to-ecr/00_vars.sh | 10 + .../baseline-to-ecr/01_run_all.sh | 11 + .../baseline-to-ecr/README.md | 13 ++ .../baseline-to-ecr/auto_scaler.sh | 54 +++++ .../baseline-to-ecr/cert_manager.sh | 57 +++++ .../baseline-to-ecr/ebs_driver.sh | 85 ++++++++ .../baseline-to-ecr/ingress_nginx.sh | 62 ++++++ .../baseline-to-ecr/metrics_server.sh | 41 ++++ .../nfs_subdir_external_provisioner.sh | 40 ++++ .../baseline-to-ecr/openldap.sh | 26 +++ .../darksite-openldap-mod/README.md | 6 + .../darksite-openldap-mod.sh | 186 ++++++++++++++++ .../01_iac_deploy.sh | 51 +++++ .../02_dac_deploy.sh | 120 +++++++++++ .../infrastructure/terraform.tfvars | 111 ++++++++++ .../software/ansible-vars-iac.yaml | 200 ++++++++++++++++++ .../software/sitedefault.yaml | 26 +++ .../install-baseline-helm-from-ecr/00_vars.sh | 6 + .../install-baseline-helm-from-ecr/README.md | 37 ++++ .../auto_scaler_install.sh | 44 ++++ .../cert_manager_install.sh | 43 ++++ .../ebs-csi-driver.sh | 57 +++++ .../ingress_nginx_install.sh | 77 +++++++ .../metrics_server_install.sh | 23 ++ .../nfs_provisioner_install.sh | 44 ++++ .../pg_nfs_provisioner_install.sh | 45 ++++ .../mirrormgr-to-ecr/00_vars.sh | 7 + .../mirrormgr-to-ecr/01_mirrormgr-ecr.sh | 35 +++ .../mirrormgr-to-ecr/02_cleanup-ecr.sh | 28 +++ .../mirrormgr-to-ecr/README.md | 23 ++ 31 files changed, 1628 insertions(+) create mode 100644 viya4-deployment-darksite/README.md create mode 100644 viya4-deployment-darksite/baseline-to-ecr/00_vars.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/README.md create mode 100644 viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh create mode 100644 viya4-deployment-darksite/baseline-to-ecr/openldap.sh create mode 100644 viya4-deployment-darksite/darksite-openldap-mod/README.md create mode 100644 viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh create mode 100755 viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh create mode 100755 viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh create mode 100755 viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars create mode 100755 viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml create mode 100755 viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh create mode 100644 viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh create mode 100644 viya4-deployment-darksite/mirrormgr-to-ecr/README.md diff --git a/viya4-deployment-darksite/README.md b/viya4-deployment-darksite/README.md new file mode 100644 index 00000000..67e415c8 --- /dev/null +++ b/viya4-deployment-darksite/README.md @@ -0,0 +1,60 @@ +# Deploy to AWS EKS in Dark Site or Air-Gapped Site scenario + +This file describes procedures, helper scripts, and example files. First decide on your deployment scenario: + +1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (dark site) - Follow procedures 1, 2, 4, and 6. +2. The deployment virtual machine and cluster has no Internet access (air-gapped site) - Follow procedures 1, 2, 5, and 6. Note: you'll still need to somehow push all the images and Helm charts to ECR from a machine with Internet access, and the deployment machine will use the private ECR endpoint in the VPC to pull these during install, so the deployment virtual machine won't need Internet access. + +**Notes:** +- The following procedures assume that the `viya4-iac-aws` project was used to deploy the EKS infrastructure. Refer to the `viya4-iac-aws-darksite` folder within the `viya4-iac-aws` [github repo](https://github.com/sassoftware/viya4-iac-aws) for the procedures to follow pertaining to IaC use with an AWS dark site configuration. +- Helper shell scripts under the `viya4-deployment-darksite` folder in this project assume that the deployment virtual machine is properly configured, confirm that: + - kubeconfig file for the EKS cluster has been installed and tested (EKS cluster admin access is verified as working) + - AWS CLI is configured + +# Procedures + +1. **Push Viya4 images to ECR (uses SAS mirrormgr tool):** + - Download deployment assets from my.sas.com + - refer to the `mirrormgr-to-ecr` folder in this repo for helper scripts + +2. **Push 3rd party images to ECR:** + - refer to the `baseline-to-ecr` folder in this repo for helper scripts + - note: OpenLDAP is only required if you are planning to use OpenLDAP for your deployment. Script to automate this is located [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/baseline-to-ecr/openldap.sh). + +3. **(Optional) If OpenLDAP is needed, modfy local viya4-deployment clone** + - Refer to the [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/darksite-openldap-mod) folder for procedures. You can build the container using the script or do it manually. + +4. **Deployment machine has Internet access - use viya4-deployment for baseline,install** + + 1. Use built in variables for baseline configurations in your `ansible-vars.yaml` file: + - Example `ansible-vars.yaml` provided [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) + - The goal here is to change the image references to point to ECR versus an Internet facing repo and add cluster subnet ID annotations for the nginx load balancers: + - Replace `{{ AWS_ACCT_ID }}` with your AWS account ID + - Replace `{{ AWS_REGION }}` with your AWS region + - Replace `{{ CONTROLLER_ECR_IMAGE_DIGEST }}` with image digest from ECR + - Replace `{{ WEBHOOK_ECR_IMAGE_DIGEST }}` with image digest from ECR + - If your VPC contains multiple subnets (unrelated to viya), you may need to add annotations to force the NLB to associate with the Viya subnets. More on that topic [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/deploy/subnet_discovery/). + + 2. Deploy viya4-deployment baseline,install. Note: the deployment virtual machine will pull the Helm charts from the Internet during this step. + +5. **Deployment machine has no Internet access - install baseline using Helm charts pulled from ECR** + - Two Options: + 1. If using OCI type repo (like ECR), we can use `viya4-deployment` but we'll need to make some changes to the baseline items in `ansible-vars.yaml`. An example provided [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) includes the needed variables for OCI Helm support. Pay close attention to `XXX_CHART_URL` and `XXX_CHART_NAME` variables. + 2. Use Helm directly to "manually" install baseline items. + - Refer to baseline-helm-install-ecr README.md for instructions. + +6. **viya4-deployment viya,install** + - **Note:** As of `viya4-deployment` v6.0.0, the project uses the Deployment Operator as the default. The deployment operator has additional considerations in a dark site deployment because the repository warehouse for the metadata will not be available without Internet access (as it is pulled from ses.sas.com). + + - There are multiple options to mitigate the issue created by using the Deployment operator: + + 1. (Easiest/Recommended) Set `V4_DEPLOYMENT_OPERATOR_ENABLED` to false. This uses the sas-orchestration method for deployment instead of the Deployment Operator (no requirement for offline repository-warehouse hosting is required). + + 2. Supply the repository information through an internally deployed http server. SAS doesn't provide instructions on how to do this, because there are a lot of ways to accomplish this. One way to accomplish this is shared in this [TS Track](https://sirius.na.sas.com/Sirius/GSTS/ShowTrack.aspx?trknum=7613552746). + + 3. Store required metadata on a file system that can be mounted to the reconciler pod (using a transformer). [TIES Blog for instructions](http://sww.sas.com/blogs/wp/technical-insights/8466/configuring-a-repository-warehouse-for-a-sas-viya-platform-deployment-at-a-dark-site/sukhda/2023/02/28) + + 4. Use DAC with `DEPLOY: false` set. This will build the manifests and references in kustomization.yaml and stop there. Then you can proceed with manual installation steps: create site.yaml and apply it to the cluster (just ensure you are using the proper kustomization version!) + + - **Important:** ensure you specify `V4_CFG_CR_URL` in your ansible-vars. This should be your ECR URL + your viya namespace! + example: I used "viya4" as my Viya namespace.... `XXXXX.dkr.ecr.{{AWS_REGION}}.amazonaws.com/viya4` diff --git a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh new file mode 100644 index 00000000..6e4fe534 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +## set variables +AWS_ACCT_ID= +AWS_REGION= + +K8S_minor_version=25 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! +DEPLOYMENT_VERSION=main # main will pull latest release of viya4-deployment. But this can be set to a specific version if needed, example: 5.2.0 + +DOCKER_SUDO= # put sudo here, if you require sudo docker commands... else leave blank \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh new file mode 100644 index 00000000..62a9c736 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +source 00_vars.sh + +. auto_scaler.sh +. cert_manager.sh +. ingress_nginx.sh +. metrics_server.sh +. nfs_subdir_external_provisioner.sh +. openldap.sh +. ebs_driver.sh \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/README.md b/viya4-deployment-darksite/baseline-to-ecr/README.md new file mode 100644 index 00000000..c3dc4f20 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/README.md @@ -0,0 +1,13 @@ +These scripts assume your aws cli and your kubeconfig is already configured! + +Notes: +- requires helm, yq, and aws cli +- these scripts will install the helm charts and corresponding container images to ECR for each baseline item. +- it will automatically set the chart version based on the version of DAC you specify. + +## Step 1: Set your variables +- Set your variables in 00_vars.sh + +## Step 2: Run script(s) +- Option 1: run 01_run_all.sh (runs all scripts) +- Option 2: run scripts individually \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh new file mode 100644 index 00000000..272df9c7 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +source 00_vars.sh + +# account for v6.3.0+ changes - autoscaler now supports k8s 1.25 +DV=$(echo $DEPLOYMENT_VERSION | sed 's/\.//g') +if [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -ge 25 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') +elif [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -le 24 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') +elif [ $DV -ge 630 ] && [ $K8S_minor_version -ge 25 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') +elif [ $DV -ge 630 ] && [ $K8S_minor_version -le 24 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') +elif [ $DV -le 620 ] ; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CLUSTER_AUTOSCALER_CHART_VERSION') +fi + +## get chart version from viya4-deployment repo +echo "**** cluster-autoscaler ****" +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add autoscaling https://kubernetes.github.io/autoscaler +helm repo update +IMG_REPO=$(helm show values autoscaling/cluster-autoscaler --version=$CHART_VERSION | yq '.image.repository') +TAG=$(helm show values autoscaling/cluster-autoscaler --version=$CHART_VERSION | yq '.image.tag') +echo "Image repo: $IMG_REPO" && echo "Image tag: $TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $IMG_REPO:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name cluster-autoscaler + +# push the helm chart to the ECR repo +helm pull autoscaling/cluster-autoscaler --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push cluster-autoscaler-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm cluster-autoscaler-$CHART_VERSION.tgz + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $IMG_REPO:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG + + +# # ## auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh new file mode 100644 index 00000000..57f34626 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +source 00_vars.sh + + +## get chart version from viya4-deployment repo +echo "**** cert-manager ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CERT_MANAGER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add jetstack https://charts.jetstack.io/ +helm repo update +IMG_CONTROLLER=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.image.repository') +IMG_WEBHOOK=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.webhook.image.repository') +IMG_CAINJECTOR=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.cainjector.image.repository') +IMG_STARTUP=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.startupapicheck.image.repository') +echo "controller repo: $IMG_CONTROLLER" && echo "webhook repo: $IMG_WEBHOOK" && echo "cainject repo: $IMG_CAINJECTOR" && echo "startupapicheck repo: $IMG_STARTUP" +echo "*********************" + + +## pull the images +$DOCKER_SUDO docker pull $IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_STARTUP:v$CHART_VERSION + + +# create ECR repos +aws ecr create-repository --no-cli-pager --repository-name cert-manager # this repo is used to store the helm chart +aws ecr create-repository --no-cli-pager --repository-name $IMG_CONTROLLER +aws ecr create-repository --no-cli-pager --repository-name $IMG_WEBHOOK +aws ecr create-repository --no-cli-pager --repository-name $IMG_CAINJECTOR +aws ecr create-repository --no-cli-pager --repository-name $IMG_STARTUP + +# push the helm charts to the ECR repo +helm pull jetstack/cert-manager --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push cert-manager-v$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm cert-manager-v$CHART_VERSION.tgz + +# ## update local images tags appropriately +$DOCKER_SUDO docker tag $IMG_CONTROLLER:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_WEBHOOK:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_CAINJECTOR:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_STARTUP:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local images to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh new file mode 100644 index 00000000..58223a74 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +source 00_vars.sh + +## get chart version from viya4-deployment repo +echo -e "\n**** aws-ebs-csi-driver ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.EBS_CSI_DRIVER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver +helm repo update +HELM_CHART=$(helm show all aws-ebs-csi-driver/aws-ebs-csi-driver --version=$CHART_VERSION) +# echo "$HELM_CHART" +IMG_REPO=$(echo "$HELM_CHART" | yq -N '.image.repository | select(. != null)') +IMG_TAG=$(echo "$HELM_CHART" | yq -N '.appVersion | select(. != null)') +PROVISIONER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.provisioner.image.repository | select(. != null)') +PROVISIONER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.provisioner.image.tag | select(. != null)') +ATTACHER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.attacher.image.repository | select(. != null)') +ATTACHER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.attacher.image.tag | select(. != null)') +SNAPSHOTTER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.snapshotter.image.repository | select(. != null)') +SNAPSHOTTER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.snapshotter.image.tag | select(. != null)') +LIVENESS_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.livenessProbe.image.repository | select(. != null)') +LIVENESS_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.livenessProbe.image.tag | select(. != null)') +RESIZER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.resizer.image.repository | select(. != null)') +RESIZER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.resizer.image.tag | select(. != null)') +NODEREG_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.nodeDriverRegistrar.image.repository | select(. != null)') +NODEREG_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.nodeDriverRegistrar.image.tag | select(. != null)') +echo "Driver image repo: $IMG_REPO" && echo "Image tag: v$IMG_TAG" +echo "Provisioning image repo: $PROVISIONER_REPO" && echo "Image tag: $PROVISIONER_TAG" +echo "Attacher image repo: $ATTACHER_REPO" && echo "Image tag: $ATTACHER_TAG" +echo "Snapshotter image repo: $SNAPSHOTTER_REPO" && echo "Image tag: $SNAPSHOTTER_TAG" +echo "Liveness image repo: $LIVENESS_REPO" && echo "Image tag: $LIVENESS_TAG" +echo "Resizer image repo: $RESIZER_REPO" && echo "Image tag: $RESIZER_TAG" +echo "NodeDriverRegister image repo: $NODEREG_REP" && echo "Image tag: $NODEREG_TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker pull $PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker pull $ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker pull $SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker pull $LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker pull $RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker pull $NODEREG_REPO:$NODEREG_TAG + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name aws-ebs-csi-driver # this is to house to helm chart +aws ecr create-repository --no-cli-pager --repository-name $IMG_REPO +aws ecr create-repository --no-cli-pager --repository-name $PROVISIONER_REPO +aws ecr create-repository --no-cli-pager --repository-name $ATTACHER_REPO +aws ecr create-repository --no-cli-pager --repository-name $SNAPSHOTTER_REPO +aws ecr create-repository --no-cli-pager --repository-name $LIVENESS_REPO +aws ecr create-repository --no-cli-pager --repository-name $RESIZER_REPO +aws ecr create-repository --no-cli-pager --repository-name $NODEREG_REPO + +# push the helm chart to the ECR repo +helm pull aws-ebs-csi-driver/aws-ebs-csi-driver --version=$CHART_VERSION +aws ecr get-login-password \ + --no-cli-pager \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push aws-ebs-csi-driver-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm aws-ebs-csi-driver-$CHART_VERSION.tgz + +# update local image tag appropriately +$DOCKER_SUDO docker tag $IMG_REPO:v$IMG_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker tag $PROVISIONER_REPO:$PROVISIONER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker tag $ATTACHER_REPO:$ATTACHER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker tag $SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker tag $LIVENESS_REPO:$LIVENESS_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker tag $RESIZER_REPO:$RESIZER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker tag $NODEREG_REPO:$NODEREG_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$NODEREG_REPO:$NODEREG_TAG + +# auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$NODEREG_REPO:$NODEREG_TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh new file mode 100644 index 00000000..4e578c17 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +source 00_vars.sh + +# determine chart version to use +V_CEILING=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionCeiling.value') +V_FLOOR=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionFloor.value') + +if [ $K8S_minor_version -ge $V_FLOOR ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionFloor.api.chartVersion') + echo "Helm chart version: $CHART_VERSION" +elif [ $K8S_minor_version -le $V_CEILING ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionCeiling.api.chartVersion') + echo "Helm chart version: $CHART_VERSION" +else + echo "Error with your minor version! Exiting..." + exit 1 +fi + +## Get helm chart info +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +CONTROLLER_REGISTRY=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.registry') +CONTROLLER_IMAGE=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.image') +CONTROLLER_TAG=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.tag') +WEBHOOKS_REGISTRY=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.registry') +WEBHOOKS_TAG=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.tag') +WEBHOOKS_IMAGE=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.image') +echo "controller repo: $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG" && echo "webhook repo: $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG" +echo "*********************" + + +## pull the image +$DOCKER_SUDO docker pull $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker pull $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name ingress-nginx # this repo is used to store the helm chart +aws ecr create-repository --no-cli-pager --repository-name $CONTROLLER_IMAGE +aws ecr create-repository --no-cli-pager --repository-name $WEBHOOKS_IMAGE + +# push the helm charts to the ECR repo +helm pull ingress-nginx/ingress-nginx --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push ingress-nginx-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm ingress-nginx-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker tag $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh new file mode 100644 index 00000000..abeb0262 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +source 00_vars.sh + +echo "**** metrics-server ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.METRICS_SERVER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update +REGISTRY=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.registry') +TAG=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.tag') +IMAGE=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.repository') +echo "Image repo: $REGISTRY/$IMAGE:$TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $REGISTRY/$IMAGE:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name metrics-server + +# push the helm chart to the ECR repo +helm pull bitnami/metrics-server --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push metrics-server-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm metrics-server-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $REGISTRY/$IMAGE:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG + + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh new file mode 100644 index 00000000..7d3408f8 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +source 00_vars.sh + +echo "**** nfs-subdir-external-provisioner ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.NFS_CLIENT_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ +helm repo update +REPOSITORY=$(helm show values nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION | yq '.image.repository') +TAG=$(helm show values nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION | yq '.image.tag') +echo "Image repo: $REPOSITORY:$TAG" +echo "*****************************************" + +## pull the image +$DOCKER_SUDO docker pull $REPOSITORY:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name nfs-subdir-external-provisioner + +# push the helm chart to the ECR repo +helm pull nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push nfs-subdir-external-provisioner-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm nfs-subdir-external-provisioner-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $REPOSITORY:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG + + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh new file mode 100644 index 00000000..88a86c2a --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +source 00_vars.sh + +echo "**** openldap ****" +IMAGE=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/vdm/templates/resources/openldap.yaml | yq -N '.spec.template.spec.containers[0].image | select(. != null)') +echo "Image: $IMAGE" +echo "******************" + +## pull the image +$DOCKER_SUDO docker pull $IMAGE + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name osixia/openldap + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $IMAGE $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE + + +# # ## auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE \ No newline at end of file diff --git a/viya4-deployment-darksite/darksite-openldap-mod/README.md b/viya4-deployment-darksite/darksite-openldap-mod/README.md new file mode 100644 index 00000000..8912dab2 --- /dev/null +++ b/viya4-deployment-darksite/darksite-openldap-mod/README.md @@ -0,0 +1,6 @@ + +## Mod roles/vdm/templates/resources/openldap.yaml + +- Only required if using an internal OpenLDAP server. By default, the cluster will reach out to docker hub to pull this image, and in a darksite this isn't possible. +- Run the darksite-openldap-mod.sh script. +- Build the modded container using the script or manually if you'd like. \ No newline at end of file diff --git a/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh new file mode 100644 index 00000000..f10c9de2 --- /dev/null +++ b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh @@ -0,0 +1,186 @@ +#!/bin/bash + +# helper script to easily mod viya4-deployment when using openldap in a darksite + + +## check that viya4-deployment/ exists in this folder +if [ ! -d "viya4-deployment/" ] +then + echo -e "\nError: Directory viya4-deployment/ does not exists!\n" + read -p "Would you like to locally clone the viya4-deployment github repo to fix (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + ## Get desired DAC version + read -p "What release version of DAC do you want to use? " -r IAC_VERSION + git clone --branch $IAC_VERSION https://github.com/sassoftware/viya4-deployment.git +fi + +echo +read -p "What is your aws account id? " -r AWS_ACCT_ID +read -p "What is your aws region? " -r AWS_REGION + +echo -e "\n+++Modding viya4-deployment/roles/vdm/templates/resources/openldap.yaml ..." + +tee viya4-deployment/roles/vdm/templates/resources/openldap.yaml > /dev/null << EOF +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openldap +spec: + replicas: 1 + selector: + matchLabels: + app: openldap + template: + metadata: + labels: + app: openldap + spec: + hostname: ldap-svc + imagePullSecrets: [] + containers: + - image: "${AWS_ACCT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/osixia/openldap:1.3.0" + imagePullPolicy: IfNotPresent + name: openldap + ports: + - containerPort: 389 + args: + - --copy-service + env: + - name: LDAP_TLS + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_TLS + - name: LDAP_ADMIN_PASSWORD + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_ADMIN_PASSWORD + - name: LDAP_DOMAIN + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_DOMAIN + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_REMOVE_CONFIG_AFTER_SETUP + - name: DISABLE_CHOWN + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: DISABLE_CHOWN + volumeMounts: + - name: bootstrap-custom + mountPath: "/container/service/slapd/assets/config/bootstrap/ldif/custom" + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: workload.sas.com/class + operator: In + values: + - stateless + matchFields: [] + weight: 100 + - preference: + matchExpressions: + - key: workload.sas.com/class + operator: NotIn + values: + - compute + - cas + - stateful + - connect + matchFields: [] + weight: 50 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/mode + operator: NotIn + values: + - system + matchFields: [] + tolerations: + - effect: NoSchedule + key: workload.sas.com/class + operator: Equal + value: stateful + - effect: NoSchedule + key: workload.sas.com/class + operator: Equal + value: stateless + volumes: + - name: bootstrap-custom + emptyDir: {} + - name: ldap-bootstrap-config + configMap: + name: openldap-bootstrap-config + items: + - key: LDAP_USERS_CONF + path: 07-testUsers.ldif + mode: 0664 + - key: LDAP_GROUPS_CONF + path: 06-testGroups.ldif + mode: 0664 + initContainers: + - name: ldap-init + image: "${AWS_ACCT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/osixia/openldap:1.3.0" + command: + - bash + - -c + - "cp -avRL /tmp/ldif/custom/* /container/service/slapd/assets/config/bootstrap/ldif/custom/" + volumeMounts: + - name: bootstrap-custom + mountPath: "/container/service/slapd/assets/config/bootstrap/ldif/custom" + - name: ldap-bootstrap-config + mountPath: "/tmp/ldif/custom" +--- +apiVersion: v1 +kind: Service +metadata: + name: ldap-svc +spec: + ports: + - port: 389 + protocol: TCP + targetPort: 389 + name: ldap + selector: + app: openldap +EOF + +echo -e "\n+++Mod complete!" + +# build modded viya4-deployment docker container? +echo +read -p "Would you like to build the modded viya4-deployment docker container (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + read -p " What tag would you like to use for the modded container? " -r TAG + docker build -t viya4-deployment:$TAG viya4-deployment/ + echo -e "\n+++Modded docker container is: viya4-deployment:${TAG}" +fi + +# push modded docker container to ECR +echo +read -p "Would you like to push the viya4-deployment:${TAG} docker container to ECR (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 +fi + +aws ecr create-repository --no-cli-pager --repository-name viya4-deployment + +docker tag viya4-deployment:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG + +aws ecr get-login-password --no-cli-pager --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG \ No newline at end of file diff --git a/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh new file mode 100755 index 00000000..142550ff --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# what is the tag +read -p "What is the tag for your viya4-iac-aws container? " -r TAG +# what is the job +read -p "What type of IaC job: plan, apply, or destroy? " -r REPLY + +# preview job +if [ $REPLY == "plan" ]; then + echo -e "\n+++Starting plan job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + plan -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate +fi + +# apply job +if [ $REPLY == "apply" ]; then + echo -e "\n+++Starting apply job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + apply -auto-approve -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate + + # Update the kubeconfig using aws cli and place here on deploy machine: ~/.kube/config + aws eks update-kubeconfig --name darksite-lab-eks + rm /home/$USER/viya/infrastructure/darksite-lab-eks-kubeconfig.conf +fi + +# destroy job +if [ $REPLY == "destroy" ]; then + read -p "Are you sure you want to continue (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + echo -e "\n+++Starting destroy job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + destroy -auto-approve -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate +fi \ No newline at end of file diff --git a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh new file mode 100755 index 00000000..01fff311 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# get viya4-deployment container tag +echo -e "\n" +read -p "What is your viya4-deployment container tag? " -r DOCKER_TAG + +TASKS=("baseline" "viya" "cluster-logging" "cluster-monitoring" "viya-monitoring" "install" "uninstall") + +##### FUNCTIONS ##### +function docker_run() { + echo "starting $tags job..." + docker run --rm \ + --group-add root \ + --user $(id -u):$(id -g) \ + --volume $(pwd)/infrastructure/ssh/id_rsa:/config/jump_svr_private_key \ + --volume $(pwd)/infrastructure/terraform.tfstate:/config/tfstate \ + --volume /home/ec2-user/.kube/config:/.kube/config \ + --volume $(pwd)/software/deployments:/data \ + --volume $(pwd)/software/viya_order_assets:/viya_order_assets \ + --volume $(pwd)/software/ansible-vars-iac.yaml:/config/config \ + --volume $(pwd)/software/ingress:/ingress \ + --volume $(pwd)/software/sitedefault.yaml:/sitedefault/sitedefault.yaml \ + viya4-deployment:$DOCKER_TAG --tags "$tags" +} + +function join_by { + local d=${1-} f=${2-} + if shift 2; then + printf %s "$f" "${@/#/$d}" + fi +} + +##### MAIN SCRIPT ##### +if [ $# -eq 0 ] +then + # what are the deploy tags + echo + echo "You didn't provide deployment tags!" + echo + echo "Tasks: baseline viya cluster-logging cluster-monitoring viya-monitoring" + echo "Actions: install uninstall" + echo + echo ' -All tasks and actions must be separated by "," ' + echo " -At least one task must be supplied. Multiple tasks are allowed. " + echo " -An action is required and must be the last and ONLY action provided." + echo + echo "Examples: baseline,viya,install" + echo " viya,uninstall " + echo + echo -n "What are your deployment tags? " + read -r REPLY +else + REPLY=$* +fi + +# split REPLY into an array +IFS=',' read -r -a array <<< "$REPLY" +# remove spaces in array elements +clean=() +for i in "${array[@]}"; do + i=${i// /} + clean+=("$i") +done + +# check if provided tasks are valid +for i in "${clean[@]}"; do + inarray=$(echo ${TASKS[@]} | grep -ow "$i" | wc -w) + if [ $inarray == 0 ]; then + echo $i "is not a valid input." + exit 0 + fi +done + +# check that more than one tag is provided +len=${#clean[@]} +if [ $len -lt 2 ]; then + echo "Not enough tags provided!" + exit 0 +fi + +# check if install and uninstall is provided correctly +count=0 +for i in "${clean[@]}"; do + if [ $i == "install" ] || [ $i == "uninstall" ]; then + (( count++ )) + fi +done +if [ $count == 0 ]; then + echo "You didn't provide an install or uninstall action!" + exit 0 +elif [ $count -gt 1 ]; then + echo "You can only have one action: install or uninstall!" + exit 0 +fi +# check that install/uninstall is last value +last="${clean[-1]}" +if [ "$last" != "install" ] && [ "$last" != "uninstall" ]; then + echo "install or uninstall must be last tag value!" + exit 0 +fi + +# if uninstall job, double check before continuing! +if [ "$last" == "uninstall" ]; then + read -p "Are you really sure you want to continue; this action is destructive!! (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi +fi + +# all checks passed so build the tags string +tags=$(join_by , ${clean[*]}) + +# run the function +docker_run + +# remove downloaded assets +if [ -f software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz ]; then + rm software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz +fi diff --git a/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars b/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars new file mode 100755 index 00000000..856f683a --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars @@ -0,0 +1,111 @@ +# !NOTE! - These are only a subset of the variables in CONFIG-VARS.md provided +# as examples. Customize this file to add any variables from CONFIG-VARS.md whose +# default values you want to change. + +# **************** REQUIRED VARIABLES **************** +# These required variables' values MUST be provided by the User +prefix = "darksite-lab" +location = "" # e.g., "us-east-1" +# **************** REQUIRED VARIABLES **************** + +# Bring your own existing resources - get values from AWS console or VPC/Subnet provisioning script outputs +vpc_id = "PrivateVPCId" +subnet_ids = { # only needed if using pre-existing subnets + "public" : ["PrivateSubnetAId", "PrivateSubnetBId"], + "private" : ["PrivateSubnetAId", "PrivateSubnetBId"], + "control_plane" : ["ControlPlaneSubnetAId", "ControlPlaneSubnetBId"], + "database" : ["PrivateSubnetAId", "PrivateSubnetBId"] # only when 'create_postgres=true' +} + +security_group_id = "PrivateVpcSGId" +cluster_security_group_id = "PrivateClusterControlSGId" +workers_security_group_id = "PrivateClusterWorkersSGId" + +# !NOTE! - Without specifying your CIDR block access rules, ingress traffic +# to your cluster will be blocked by default. + +# ************** RECOMMENDED VARIABLES *************** +default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"] # not required in a darksite +ssh_public_key = "/workspace/ssh/id_rsa.pub" # container path to ssh public key used for jumpserver +# ************** RECOMMENDED VARIABLES *************** + +# Tags for all tagable items in your cluster. +tags = { } # e.g., { "key1" = "value1", "key2" = "value2" } + +# Postgres config - By having this entry a database server is created. If you do not +# need an external database server remove the 'postgres_servers' +# block below. +# postgres_servers = { +# default = {}, +# } + +## Cluster config +cluster_api_mode = "private" +kubernetes_version = "1.26" +default_nodepool_node_count = 1 +default_nodepool_vm_type = "m5.2xlarge" + +## General +storage_type = "standard" +nfs_raid_disk_type = "gp3" +nfs_raid_disk_iops = "3000" + +## Cluster Node Pools config +node_pools = { + cas = { + "vm_type" = "m5.2xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 1 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=cas:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "cas" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + }, + compute = { + "vm_type" = "m5.8xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 1 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=compute:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "compute" + "launcher.sas.com/prepullImage" = "sas-programming-environment" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + }, + services = { + "vm_type" = "m5.4xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 0 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "stateful" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + } +} + +# Jump Server +create_jump_vm = true +create_jump_public_ip = false diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml new file mode 100755 index 00000000..ee7cc7d0 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml @@ -0,0 +1,200 @@ +## Cluster +NAMESPACE: viya + +## MISC +DEPLOY: true # Set to false to stop at generating the manifest +LOADBALANCER_SOURCE_RANGES: ['192.168.8.0/24'] +KUBECONFIG: /.kube/config +V4_DEPLOYMENT_OPERATOR_ENABLED: false # sas-orchestration does not phone home for entitlements (set to false for darksite) + +## Storage +V4_CFG_MANAGE_STORAGE: true +#V4_CFG_RWX_FILESTORE_PATH: "/" # NOTE: EFS is "/" but NFS is "/export" (for NFS) + +## SAS Software Order +V4_CFG_ORDER_NUMBER: # order number +V4_CFG_CADENCE_NAME: # stable or lts +V4_CFG_CADENCE_VERSION: # cadence version +## Providing the following three variables will bypass DAC using SAS Viya API (DAC 6.2.0+): +V4_CFG_DEPLOYMENT_ASSETS: /viya_order_assets/ # container path to deployment assets +V4_CFG_LICENSE: /viya_order_assets/ # container path to license file (.jwt) +V4_CFG_CERTS: /viya_order_assets/ # container path to viya certs + +## Path to sitedefault.yaml +V4_CFG_SITEDEFAULT: /sitedefault/sitedefault.yaml # container path to sitedefault.yaml + +## CR Access +V4_CFG_CR_URL: "{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/viya" # update this for your account and region + +## Ingress +V4_CFG_INGRESS_TYPE: ingress +V4_CFG_INGRESS_MODE: "private" +# if no FQDN dns registration, use the DNS of the private NLB, here is a way to get that automatically: +# V4_CFG_INGRESS_FQDN: $(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath={'.status.loadBalancer.ingress[0].ip'}) +V4_CFG_INGRESS_FQDN: +V4_CFG_TLS_MODE: "full-stack" # [full-stack|front-door|ingress-only|disabled] + +## Postgres +V4_CFG_POSTGRES_SERVERS: + default: + internal: true + postgres_pvc_storage_size: 10Gi + postgres_pvc_access_mode: ReadWriteOnce + postgres_storage_class: sas + backrest_storage_class: sas + +## LDAP +V4_CFG_EMBEDDED_LDAP_ENABLE: true # Note: will require the DaC tool (openldap deployment) to be modded to point to ECR for openldap container image + +## Baseline configs are specifically for repos that use OCI for helm charts (like ECR) + +## Cert-manager config +CERT_MANAGER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +CERT_MANAGER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cert-manager +CERT_MANAGER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-controller + webhook: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-webhook + cainjector: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-cainjector + startupapicheck: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-ctl + installCRDs: "true" + extraArgs: + - --enable-certificate-owner-ref=true + +## Metrics-server config +METRICS_SERVER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +METRICS_SERVER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/metrics-server +METRICS_SERVER_CONFIG: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + repository: metrics-server + apiService: + create: true + +## NGINX config +INGRESS_NGINX_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +INGRESS_NGINX_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/ingress-nginx +INGRESS_NGINX_CONFIG: + controller: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + image: ingress-nginx/controller + digest: {{ CONTROLLER_ECR_IMAGE_DIGEST }} + admissionWebhooks: + patch: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + image: ingress-nginx/kube-webhook-certgen + digest: {{ WEBHOOK_ECR_IMAGE_DIGEST }} + service: + externalTrafficPolicy: Local + sessionAffinity: None + loadBalancerSourceRanges: "{{ LOADBALANCER_SOURCE_RANGES |default(['0.0.0.0/0'], -1) }}" + config: + use-forwarded-headers: "true" + hsts-max-age: "63072000" + tcp: {} + udp: {} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep 5; /usr/local/nginx/sbin/nginx -c /etc/nginx/nginx.conf -s quit; while pgrep -x nginx; do sleep 1; done"] + terminationGracePeriodSeconds: 600 + +# nfs client config +NFS_CLIENT_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +NFS_CLIENT_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner +NFS_CLIENT_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner + nfs: + server: "{{ V4_CFG_RWX_FILESTORE_ENDPOINT }}" + path: "{{ V4_CFG_RWX_FILESTORE_PATH | replace('/$', '') }}/pvs" + mountOptions: + - noatime + - nodiratime + - 'rsize=262144' + - 'wsize=262144' + storageClass: + archiveOnDelete: "false" + name: sas + +# pg-storage class config +PG_NFS_CLIENT_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +PG_NFS_CLIENT_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner +PG_NFS_CLIENT_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner + nfs: + server: "{{ V4_CFG_RWX_FILESTORE_ENDPOINT }}" + path: "{{ V4_CFG_RWX_FILESTORE_PATH | replace('/$', '') }}/pvs" + mountOptions: + - noatime + - nodiratime + - 'rsize=262144' + - 'wsize=262144' + storageClass: + archiveOnDelete: "false" + reclaimPolicy: "Retain" + name: pg-storage + +# auto-scaler +CLUSTER_AUTOSCALER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +CLUSTER_AUTOSCALER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cluster-autoscaler +CLUSTER_AUTOSCALER_LOCATION: {{ AWS_REGION }} +CLUSTER_AUTOSCALER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cluster-autoscaler + awsRegion: "{{ CLUSTER_AUTOSCALER_LOCATION }}" + autoDiscovery: + clusterName: "{{ CLUSTER_NAME }}" + rbac: + serviceAccount: + name: cluster-autoscaler + annotations: + "eks.amazonaws.com/role-arn": "{{ CLUSTER_AUTOSCALER_ACCOUNT }}" + "eks.amazonaws.com/sts-regional-endpoints": “true” + extraEnv: + AWS_STS_REGIONAL_ENDPOINTS: regional + extraArgs: + aws-use-static-instance-list: true # this keeps autoscaler from going to the internet for the ec2 list on init, auto-scaler will fail in darksite without this + +# EBS CSI DRIVER +EBS_CSI_DRIVER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +EBS_CSI_DRIVER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/aws-ebs-csi-driver +EBS_CSI_DRIVER_LOCATION: {{ AWS_REGION }} +EBS_CSI_DRIVER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver + sidecars: + provisioner: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-provisioner + attacher: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-attacher + snapshotter: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-snapshotter + livenessProbe: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/livenessprobe + resizer: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-resizer + nodeDriverRegistrar: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-node-driver-registrar + controller: + region: "{{ EBS_CSI_DRIVER_LOCATION }}" + serviceAccount: + create: true + name: ebs-csi-controller-sa + annotations: + "eks.amazonaws.com/role-arn": "{{ EBS_CSI_DRIVER_ACCOUNT }}" diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml new file mode 100755 index 00000000..6a84adc2 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml @@ -0,0 +1,26 @@ +cacerts: +config: + application: + sas.identities.providers.ldap.connection: + host: ldap-svc + password: Password123 + port: 389 + url: ldap://${sas.identities.providers.ldap.connection.host}:${sas.identities.providers.ldap.connection.port} + userDN: cn=admin,dc=example,dc=com + sas.identities.providers.ldap.group: + baseDN: ou=groups,dc=example,dc=com + accountId: cn + member: uniqueMember + memberOf: memberOf + objectClass: groupOfUniqueNames + objectFilter: (objectClass=groupOfUniqueNames) + searchFilter: cn={0} + sas.identities.providers.ldap.user: + baseDN: ou=people,dc=example,dc=com + accountId: uid + memberOf: memberOf + objectClass: inetOrgPerson + objectFilter: (objectClass=inetOrgPerson) + searchFilter: uid={0} + sas.logon.initial.password: Password123 +config/identities/sas.identities/administrator: viya_admin \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh new file mode 100644 index 00000000..7cde5e20 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +AWS_ACCT_ID= +AWS_REGION= + +K8S_minor_version=24 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md b/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md new file mode 100644 index 00000000..3c75be84 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md @@ -0,0 +1,37 @@ +# Gather some facts before running these scripts: + +## Global Variables (00_vars.sh) +1. AWS Account ID +2. AWS Region +3. K8s minor version + +## metrics-server +1. helm chart version + +## auto-scaler +1. helm chart version +2. Cluster name +3. Autoscaler ARN + +## ingress-nginx +1. helm chart version +2. controller image digest (sha256) - get this from your ECR +3. webhook image digest (sha256) - get this from your ECR +4. load balancer source ranges (must be list example: ["0.0.0.0/0"]) + +## nfs-subdir-external-provisioner +1. nfs-subdir-external-provisioner helm chart version +2. RWX filestore endpoint (IP or DNS for endpoint..) +3. RWX filestore path (don't include ../pvs as it is already appended in the script) + +## pg-nfs-provisioner +1. helm chart version +2. RWX filestore endpoint (IP or DNS for endpoint..) +3. RWX filestore path (don't include ../pvs as it is already appended in the script) + +## cert-manager +1. helm chart version (don't include the proceeding v, it is already appended in the script) + +## ebs-csi-driver +1. helm chart version +2. eks.amazonaws.com/role-arn for EBS_CSI_DRIVER \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh new file mode 100644 index 00000000..99767e64 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "cluster name: " cluster_name +read -p "autoscaler ARN: " autoscaler_arn +read -p "cluster-autoscaler helm chart version: " CHART_VERSION + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "Installing auto-scaler...\n\n" +helm upgrade --cleanup-on-fail \ + --install cluster-autoscaler oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler \ + --version=$CHART_VERSION \ + --values tmp.yaml + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh new file mode 100644 index 00000000..1b850f96 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p 'cert-manager helm chart version (do not include the proceeding "v"): ' CHART_VERSION + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing cert-manager...\n\n" + +helm upgrade --cleanup-on-fail \ + --install cert-manager oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cert-manager \ + --version=v$CHART_VERSION \ + --values tmp.yaml \ + --namespace cert-manager \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh new file mode 100644 index 00000000..3bff5d90 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +## installs ebs-csi-driver via helm + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "What is the aws-ebs-csi-driver helm chart version? " CHART_VERSION +read -p "What is the eks.amazonaws.com/role-arn for EBS_CSI_DRIVER? " ARN + +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "\nInstalling ingress-nginx...\n" +helm upgrade --cleanup-on-fail \ + --install aws-ebs-csi-driver oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/aws-ebs-csi-driver --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace kube-system + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh new file mode 100644 index 00000000..f31fdcda --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +## installs this by default: +# - INGRESS_NGINX_CVE_2021_25742_PATCH +# - ingress-nginx private ingress + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "ingress-nginx helm chart version: " CHART_VERSION +read -p "controller image digest (sha256): " CONTROLLER_DIGEST +read -p "webhook image digest (sha256): " WEBHOOK_DIGEST +read -p 'load balancer source ranges? must be a list (example): ["0.0.0.0/0"] ' LB + +# handle version differences with webhook path +CHART_VERSION_INT=$(echo "${CHART_VERSION//.}") +if [ $CHART_VERSION_INT -lt 411 ]; then + WEBHOOK_PATH=jettech +elif [ $CHART_VERSION_INT -ge 411 ]; then + WEBHOOK_PATH=ingress-nginx +else + echo "Error with your helm chart versions! Exiting..." + exit 1 +fi + +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "\nInstalling ingress-nginx...\n" +helm upgrade --cleanup-on-fail \ + --install ingress-nginx oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ingress-nginx --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace ingress-nginx \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh new file mode 100644 index 00000000..029290f9 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "metrics-server helm chart version: " CHART_VERSION + +echo -e "Installing metrics-server...\n\n" + +helm upgrade --cleanup-on-fail \ + --install metrics-server oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server --version=$CHART_VERSION \ + --set image.registry=$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com \ + --set image.repository=metrics-server \ + --set apiService.create=true + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh new file mode 100644 index 00000000..d9c3bcf1 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "nfs-subdir-external-provisioner helm chart version: " CHART_VERSION +read -p "RWX filestore endpoint: " ENDPOINT +read -p "RWX filestore path (don't include ../pvs): " ENDPOINT_PATH + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing nfs-subdir-external-provisioner...\n\n" + +helm upgrade --cleanup-on-fail \ + --install nfs-subdir-external-provisioner oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner \ + --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace nfs-client \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh new file mode 100644 index 00000000..0e704125 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "nfs-subdir-external-provisioner helm chart version: " CHART_VERSION +read -p "RWX filestore endpoint: " ENDPOINT +read -p "RWX filestore path (don't include ../pvs): " ENDPOINT_PATH + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing nfs-subdir-external-provisioner...\n\n" + +helm upgrade --cleanup-on-fail \ + --install nfs-subdir-external-provisioner-pg-storage oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner \ + --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace nfs-client \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml \ No newline at end of file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh new file mode 100644 index 00000000..57ade5eb --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +NAMESPACE= # namespace used for your viya install +AWS_ACCT_ID= # your aws account ID +REGION= # your aws region +CERTS=~/viya/software/viya_order_assets/SASViyaV4_XXXX_certs.zip # path to the _certs.zip file +ASSETS=~/viya/software/viya_order_assets/SASViyaV4_XXX_XXXX-XXXX_deploymentAssets.tgz # path to the tgz assets file \ No newline at end of file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh new file mode 100644 index 00000000..b5fcf3ff --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +## mirrormgr must be installed and in $PATH prior to running this script +## aws cli should be configured prior to running this script +## place your downloaded assets in the assets/ folder + +### source variables from 00_vars.sh +source 00_vars.sh + + +# create repositories? +echo +read -p "Do you need to create the ECR repositories? (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + # check if ECR repositories exist and create + for repo in $(mirrormgr list target docker repos --deployment-data $CERTS --destination $NAMESPACE) ; do + aws ecr create-repository --repository-name $repo --region $REGION + done +fi + + +# proceed with mirroring images? +echo +read -p "Proceed with mirroring images? this will take some time... (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + # populate the repositories.. this will take some time! + mirrormgr mirror registry -p ./sas_repos \ + --deployment-data $CERTS \ + --deployment-assets $ASSETS \ + --destination https://$AWS_ACCT_ID.dkr.ecr.$REGION.amazonaws.com/$NAMESPACE \ + --username 'AWS' \ + --password $(aws ecr get-login-password --region $REGION) +fi \ No newline at end of file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh new file mode 100644 index 00000000..f628fb57 --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# this script will help you quickly clean up viya related ECR repos + +### source variables from 00_vars.sh +source 00_vars.sh + +# get all the repos within the aws subscription +REPOS=$(aws ecr describe-repositories --region $REGION) + +# delete the SAS Viya repos +read -p "Are you sure you'd like to delete all SAS Viya repos and images (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + echo $REPOS | jq -r --arg keyword $NAMESPACE '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done +fi + +# delete the 3rd party repos +read -p "Are you sure you'd like to delete all 3rd party SAS Viya related repos and images (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + echo $REPOS | jq -r --arg keyword cert-manager '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword cluster-autoscaler '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword ingress-nginx '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword nfs-subdir-external-provisioner '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword metrics-server '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword openldap '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done +fi diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/README.md b/viya4-deployment-darksite/mirrormgr-to-ecr/README.md new file mode 100644 index 00000000..c53de24f --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/README.md @@ -0,0 +1,23 @@ +## Helper Script to help with mirrormgr + +SAS documentation specific to using mirrormgr for AWS ECR located [here](https://go.documentation.sas.com/doc/en/itopscdc/v_029/dplyml0phy0dkr/p0lexw9inr33ofn1tbo69twarhlx.htm). + +## Step 1: Download Order Assets +- Download order assets [here](https://my.sas.com/en/my-orders.html). Check all under "order assets". + +## Step 2: Unzip to assets/ folder +- Unzip multipleAssets zip to assets/ folder ... if following the darksite-lab: place in /home/ec2-user/viya/software/viya_order_assets + +## Step 3: Install mirrormgr +- Download [here](https://support.sas.com/en/documentation/install-center/viya/deployment-tools/4/mirror-manager.html). + +## Step 4: Update variables in 00_vars.sh + +## Step 5: Run mirrormgr-ecr.sh +- The script assumes your AWS CLI is already configured. +- This script will use `mirrormgr` to create AWS ECR repos for each viya4 image (AWS requirement). +- This script will download the viya4 images locally, then using `mirrormgr`, automatically push them to the appropriate ECR repo. + - This will take some time based on your local bandwidth. Note: the images are around ~120GiB total. + +## Helper script to help clean up ECR: cleanup-ecr.sh +- This script uses AWS CLI to delete all the SAS Viya and 3rd party repositories and images. This makes life easier when you need to clean up the AWS ECR. From fe05598c545053d0490dc98edc1c61bf6743046f Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Mon, 8 Apr 2024 22:35:47 -0400 Subject: [PATCH 02/13] Fix shellcheck error & warnings --- .../02_dac_deploy.sh | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh index 01fff311..f432163a 100755 --- a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh +++ b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh @@ -11,15 +11,15 @@ function docker_run() { echo "starting $tags job..." docker run --rm \ --group-add root \ - --user $(id -u):$(id -g) \ - --volume $(pwd)/infrastructure/ssh/id_rsa:/config/jump_svr_private_key \ - --volume $(pwd)/infrastructure/terraform.tfstate:/config/tfstate \ + --user "$(id -u)":"$(id -g)" \ + --volume "$(pwd)"/infrastructure/ssh/id_rsa:/config/jump_svr_private_key \ + --volume "$(pwd)"/infrastructure/terraform.tfstate:/config/tfstate \ --volume /home/ec2-user/.kube/config:/.kube/config \ - --volume $(pwd)/software/deployments:/data \ - --volume $(pwd)/software/viya_order_assets:/viya_order_assets \ - --volume $(pwd)/software/ansible-vars-iac.yaml:/config/config \ - --volume $(pwd)/software/ingress:/ingress \ - --volume $(pwd)/software/sitedefault.yaml:/sitedefault/sitedefault.yaml \ + --volume "$(pwd)"/software/deployments:/data \ + --volume "$(pwd)"/software/viya_order_assets:/viya_order_assets \ + --volume "$(pwd)"/software/ansible-vars-iac.yaml:/config/config \ + --volume "$(pwd)"/software/ingress:/ingress \ + --volume "$(pwd)"/software/sitedefault.yaml:/sitedefault/sitedefault.yaml \ viya4-deployment:$DOCKER_TAG --tags "$tags" } @@ -64,7 +64,7 @@ done # check if provided tasks are valid for i in "${clean[@]}"; do - inarray=$(echo ${TASKS[@]} | grep -ow "$i" | wc -w) + inarray=$(echo "${TASKS[@]}" | grep -ow "$i" | wc -w) if [ $inarray == 0 ]; then echo $i "is not a valid input." exit 0 @@ -109,12 +109,20 @@ if [ "$last" == "uninstall" ]; then fi # all checks passed so build the tags string -tags=$(join_by , ${clean[*]}) +tags=$(join_by , "${clean[@]}") # run the function docker_run # remove downloaded assets -if [ -f software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz ]; then - rm software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz -fi +for f in software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz; do + ## Check if the glob gets expanded to existing files. + ## If not, f here will be exactly the pattern above + ## and the exists test will evaluate to false. + [ -f "$f" ] && rm software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz || echo "SASViyaV4.tgz files do not exist" + + ## If one more files exist, they are all removed at once, so we can break after the first iteration + break +done + + From 1a581b5809b00e7cfebdc66cd83f57195d09f682 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Tue, 9 Apr 2024 18:22:39 -0400 Subject: [PATCH 03/13] Fix missing end-of-file-newline, add .pre-commit cfg file to .gitignore --- .gitignore | 1 + .../deployment-machine-assets/software/sitedefault.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index a8470946..5ace483b 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ .galaxy_install_info ## ignore ansible-vars.yml +.pre-commit-config.yaml ansible-vars.yml ansible-vars.yaml diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml index 6a84adc2..9c717d0a 100755 --- a/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml +++ b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml @@ -23,4 +23,4 @@ config: objectFilter: (objectClass=inetOrgPerson) searchFilter: uid={0} sas.logon.initial.password: Password123 -config/identities/sas.identities/administrator: viya_admin \ No newline at end of file +config/identities/sas.identities/administrator: viya_admin From d6b7963862e58fe2b5f1d87d081cfbc34f66b265 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Tue, 9 Apr 2024 18:25:10 -0400 Subject: [PATCH 04/13] add dark site ansible-vars-iac.yaml file to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 5ace483b..69681736 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ .pre-commit-config.yaml ansible-vars.yml ansible-vars.yaml +ansible-vars-iac.yaml ## ignore data directory data/ From b7f1bacf941eca5621e63a24d22e5f532fe2a3fb Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Tue, 9 Apr 2024 18:36:23 -0400 Subject: [PATCH 05/13] revert .gitignore change, add ansible-vars-iac.yaml to .ansible-lint skip list --- .gitignore | 1 - linting-configs/.ansible-lint | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 69681736..5ace483b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ .pre-commit-config.yaml ansible-vars.yml ansible-vars.yaml -ansible-vars-iac.yaml ## ignore data directory data/ diff --git a/linting-configs/.ansible-lint b/linting-configs/.ansible-lint index b922c78a..a8fddcbd 100644 --- a/linting-configs/.ansible-lint +++ b/linting-configs/.ansible-lint @@ -41,6 +41,7 @@ exclude_paths: - roles/istio - roles/vdm/tasks/deploy.yaml # TODO schema[tasks] error for a docker 'Deploy BLT - Deploy SAS Viya' task - .github/workflows # non ansible files + - viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml # dark site ansible-vars.yaml file template # Offline mode disables installation of requirements.yml and schema refreshing offline: false From af690a1ec94f2aa8d6ada2fa28462731f45279de Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Tue, 9 Apr 2024 19:57:43 -0400 Subject: [PATCH 06/13] add Acknowledgments --- viya4-deployment-darksite/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/viya4-deployment-darksite/README.md b/viya4-deployment-darksite/README.md index 67e415c8..d7359007 100644 --- a/viya4-deployment-darksite/README.md +++ b/viya4-deployment-darksite/README.md @@ -1,5 +1,11 @@ # Deploy to AWS EKS in Dark Site or Air-Gapped Site scenario +### Acknowledgments + +The following individuals have contributed documentation, helper scripts and yaml templates that provided the basis for this document. +- Josh Coburn +- Matthias Ender + This file describes procedures, helper scripts, and example files. First decide on your deployment scenario: 1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (dark site) - Follow procedures 1, 2, 4, and 6. From 9b2459a014c2cb068bb3817c0cbf8217259eaa9f Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Wed, 10 Apr 2024 15:53:14 -0400 Subject: [PATCH 07/13] Update github file links to branch files, these will be deleted after review prior to merging to staging --- .ansible-lint | 48 +++++++++++++++++++++++++---- viya4-deployment-darksite/README.md | 14 ++++----- 2 files changed, 49 insertions(+), 13 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 0c9cf06f..b922c78a 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,17 +1,53 @@ -var_naming_pattern: "^[a-zA-Z0-9_]*$" +--- +# .ansible-lint -parseable: true +profile: moderate +verbosity: 1 +strict: true +# Enforce variable names to follow pattern below, in addition to Ansible own +# requirements, like avoiding python identifiers. To disable add `var-naming` +# to skip_list. +var_naming_pattern: ^[a-zA-Z0-9_]*$ + +use_default_rules: true + +# Ansible-lint is able to recognize and load skip rules stored inside +# `.ansible-lint-ignore` (or `.config/ansible-lint-ignore.txt`) files. +# To skip a rule just enter filename and tag, like "playbook.yml package-latest" +# on a new line. +skip_list: + - role-name # DAC roles names contain dashes, can be ignored + - yaml[line-length] # it's easier to understand/debug the underlying command when it's not broken up + - name[template] # task name uses Jina template, this can be ignored + - var-naming + +# Ansible-lint does not automatically load rules that have the 'opt-in' tag. +# You must enable opt-in rules by listing each rule 'id' below. +enable_list: + - args + - empty-string-compare + - no-log-password + - no-same-owner + - yaml + +# exclude_paths included in this file are parsed relative to this file's location +# and not relative to the CWD of execution. CLI arguments passed to the --exclude +# option are parsed relative to the CWD of execution. exclude_paths: - .git/ - .gitignore - .cache/ - roles/istio + - roles/vdm/tasks/deploy.yaml # TODO schema[tasks] error for a docker 'Deploy BLT - Deploy SAS Viya' task + - .github/workflows # non ansible files -skip_list: - - unnamed-task - - role-name - - var-naming +# Offline mode disables installation of requirements.yml and schema refreshing +offline: false + +# Define required Ansible's variables to satisfy syntax check +extra_vars: + deployment_type: vsphere warn_list: - experimental diff --git a/viya4-deployment-darksite/README.md b/viya4-deployment-darksite/README.md index d7359007..b0570623 100644 --- a/viya4-deployment-darksite/README.md +++ b/viya4-deployment-darksite/README.md @@ -1,12 +1,12 @@ # Deploy to AWS EKS in Dark Site or Air-Gapped Site scenario -### Acknowledgments +### Contributors -The following individuals have contributed documentation, helper scripts and yaml templates that provided the basis for this document. +Thanks go to the following individuals who have contributed documentation, helper scripts and yaml templates that provided the basis for this document. - Josh Coburn - Matthias Ender -This file describes procedures, helper scripts, and example files. First decide on your deployment scenario: +This file describes procedures, helper scripts, and example files to assist with performing a dark site deployment using the `viya4-deploymemt` GitHub project. First decide on your deployment scenario: 1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (dark site) - Follow procedures 1, 2, 4, and 6. 2. The deployment virtual machine and cluster has no Internet access (air-gapped site) - Follow procedures 1, 2, 5, and 6. Note: you'll still need to somehow push all the images and Helm charts to ECR from a machine with Internet access, and the deployment machine will use the private ECR endpoint in the VPC to pull these during install, so the deployment virtual machine won't need Internet access. @@ -25,15 +25,15 @@ This file describes procedures, helper scripts, and example files. First decide 2. **Push 3rd party images to ECR:** - refer to the `baseline-to-ecr` folder in this repo for helper scripts - - note: OpenLDAP is only required if you are planning to use OpenLDAP for your deployment. Script to automate this is located [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/baseline-to-ecr/openldap.sh). + - note: OpenLDAP is only required if you are planning to use OpenLDAP for your deployment. Script to automate this is located [here](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-deployment-darksite/baseline-to-ecr/openldap.sh) [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/baseline-to-ecr/openldap.sh). 3. **(Optional) If OpenLDAP is needed, modfy local viya4-deployment clone** - - Refer to the [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/darksite-openldap-mod) folder for procedures. You can build the container using the script or do it manually. + - Refer to the [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-aws-darksite/darksite-openldap-mod) [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/darksite-openldap-mod) folder for procedures. You can build the container using the script or do it manually. 4. **Deployment machine has Internet access - use viya4-deployment for baseline,install** 1. Use built in variables for baseline configurations in your `ansible-vars.yaml` file: - - Example `ansible-vars.yaml` provided [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) + - Example `ansible-vars.yaml` provided [here](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) - The goal here is to change the image references to point to ECR versus an Internet facing repo and add cluster subnet ID annotations for the nginx load balancers: - Replace `{{ AWS_ACCT_ID }}` with your AWS account ID - Replace `{{ AWS_REGION }}` with your AWS region @@ -45,7 +45,7 @@ This file describes procedures, helper scripts, and example files. First decide 5. **Deployment machine has no Internet access - install baseline using Helm charts pulled from ECR** - Two Options: - 1. If using OCI type repo (like ECR), we can use `viya4-deployment` but we'll need to make some changes to the baseline items in `ansible-vars.yaml`. An example provided [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) includes the needed variables for OCI Helm support. Pay close attention to `XXX_CHART_URL` and `XXX_CHART_NAME` variables. + 1. If using OCI type repo (like ECR), we can use `viya4-deployment` but we'll need to make some changes to the baseline items in `ansible-vars.yaml`. An example provided [here](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) includes the needed variables for OCI Helm support. Pay close attention to `XXX_CHART_URL` and `XXX_CHART_NAME` variables. 2. Use Helm directly to "manually" install baseline items. - Refer to baseline-helm-install-ecr README.md for instructions. From 772daba54b15c9d7b6f2faa358f50bb4a3ed8c6a Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Wed, 10 Apr 2024 16:03:45 -0400 Subject: [PATCH 08/13] Update "uses: actions/checkout@v3" to v4 --- .github/workflows/linter-analysis.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linter-analysis.yaml b/.github/workflows/linter-analysis.yaml index 742e3f4b..0f78ffeb 100644 --- a/.github/workflows/linter-analysis.yaml +++ b/.github/workflows/linter-analysis.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run Hadolint Action uses: jbergstroem/hadolint-gh-action@v1.11.0 @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 # .shellcheckrc is read from the current dir - name: Copy Config to Parent Level Directory @@ -42,7 +42,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 # The latest ansible/ansible-lint-action removed the # ability to specify configs from other dirs From ed187092cf6e591015778b1035af0129cc562b64 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Fri, 3 May 2024 17:13:19 -0400 Subject: [PATCH 09/13] Edits for clarity --- viya4-deployment-darksite/README.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/viya4-deployment-darksite/README.md b/viya4-deployment-darksite/README.md index b0570623..81fda25c 100644 --- a/viya4-deployment-darksite/README.md +++ b/viya4-deployment-darksite/README.md @@ -2,17 +2,23 @@ ### Contributors -Thanks go to the following individuals who have contributed documentation, helper scripts and yaml templates that provided the basis for this document. +We thank the following individuals for technical assistance and their contributions of documentation, scripts and yaml templates that provided the basis for this document. - Josh Coburn - Matthias Ender -This file describes procedures, helper scripts, and example files to assist with performing a dark site deployment using the `viya4-deploymemt` GitHub project. First decide on your deployment scenario: +### Background -1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (dark site) - Follow procedures 1, 2, 4, and 6. +This file describes procedures, helper scripts, and example files to assist with performing a Dark Site deployment using the `viya4-deploymemt` GitHub project. + +### Dark Site Deployment Scenarios + +Choose the deployment scenario that describes your Dark Site configuration: + +1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (Dark Site) - Follow procedures 1, 2, 4, and 6. 2. The deployment virtual machine and cluster has no Internet access (air-gapped site) - Follow procedures 1, 2, 5, and 6. Note: you'll still need to somehow push all the images and Helm charts to ECR from a machine with Internet access, and the deployment machine will use the private ECR endpoint in the VPC to pull these during install, so the deployment virtual machine won't need Internet access. **Notes:** -- The following procedures assume that the `viya4-iac-aws` project was used to deploy the EKS infrastructure. Refer to the `viya4-iac-aws-darksite` folder within the `viya4-iac-aws` [github repo](https://github.com/sassoftware/viya4-iac-aws) for the procedures to follow pertaining to IaC use with an AWS dark site configuration. +- The following procedures assume that the `viya4-iac-aws` project was used to deploy the EKS infrastructure. Refer to the `viya4-iac-aws-darksite` folder within the `viya4-iac-aws` [github repo](https://github.com/sassoftware/viya4-iac-aws) for the procedures to follow pertaining to IaC use with an AWS Dark Site configuration. - Helper shell scripts under the `viya4-deployment-darksite` folder in this project assume that the deployment virtual machine is properly configured, confirm that: - kubeconfig file for the EKS cluster has been installed and tested (EKS cluster admin access is verified as working) - AWS CLI is configured @@ -50,7 +56,7 @@ This file describes procedures, helper scripts, and example files to assist with - Refer to baseline-helm-install-ecr README.md for instructions. 6. **viya4-deployment viya,install** - - **Note:** As of `viya4-deployment` v6.0.0, the project uses the Deployment Operator as the default. The deployment operator has additional considerations in a dark site deployment because the repository warehouse for the metadata will not be available without Internet access (as it is pulled from ses.sas.com). + - **Note:** As of `viya4-deployment` v6.0.0, the project uses the Deployment Operator as the default. The deployment operator has additional considerations in a Dark Site deployment because the repository warehouse for the metadata will not be available without Internet access (as it is pulled from ses.sas.com). - There are multiple options to mitigate the issue created by using the Deployment operator: From e3dbc344d7910e1525545970af1fa722eb7efe62 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Thu, 9 May 2024 10:41:51 -0400 Subject: [PATCH 10/13] Experimental feature README.md will not be made available in GitHub yet --- viya4-deployment-darksite/README.md | 72 ----------------------------- 1 file changed, 72 deletions(-) delete mode 100644 viya4-deployment-darksite/README.md diff --git a/viya4-deployment-darksite/README.md b/viya4-deployment-darksite/README.md deleted file mode 100644 index 81fda25c..00000000 --- a/viya4-deployment-darksite/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Deploy to AWS EKS in Dark Site or Air-Gapped Site scenario - -### Contributors - -We thank the following individuals for technical assistance and their contributions of documentation, scripts and yaml templates that provided the basis for this document. -- Josh Coburn -- Matthias Ender - -### Background - -This file describes procedures, helper scripts, and example files to assist with performing a Dark Site deployment using the `viya4-deploymemt` GitHub project. - -### Dark Site Deployment Scenarios - -Choose the deployment scenario that describes your Dark Site configuration: - -1. The deployment virtual machine has Internet access but the EKS cluster cannot reach the Internet (Dark Site) - Follow procedures 1, 2, 4, and 6. -2. The deployment virtual machine and cluster has no Internet access (air-gapped site) - Follow procedures 1, 2, 5, and 6. Note: you'll still need to somehow push all the images and Helm charts to ECR from a machine with Internet access, and the deployment machine will use the private ECR endpoint in the VPC to pull these during install, so the deployment virtual machine won't need Internet access. - -**Notes:** -- The following procedures assume that the `viya4-iac-aws` project was used to deploy the EKS infrastructure. Refer to the `viya4-iac-aws-darksite` folder within the `viya4-iac-aws` [github repo](https://github.com/sassoftware/viya4-iac-aws) for the procedures to follow pertaining to IaC use with an AWS Dark Site configuration. -- Helper shell scripts under the `viya4-deployment-darksite` folder in this project assume that the deployment virtual machine is properly configured, confirm that: - - kubeconfig file for the EKS cluster has been installed and tested (EKS cluster admin access is verified as working) - - AWS CLI is configured - -# Procedures - -1. **Push Viya4 images to ECR (uses SAS mirrormgr tool):** - - Download deployment assets from my.sas.com - - refer to the `mirrormgr-to-ecr` folder in this repo for helper scripts - -2. **Push 3rd party images to ECR:** - - refer to the `baseline-to-ecr` folder in this repo for helper scripts - - note: OpenLDAP is only required if you are planning to use OpenLDAP for your deployment. Script to automate this is located [here](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-deployment-darksite/baseline-to-ecr/openldap.sh) [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/baseline-to-ecr/openldap.sh). - -3. **(Optional) If OpenLDAP is needed, modfy local viya4-deployment clone** - - Refer to the [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-aws-darksite/darksite-openldap-mod) [darksite-openldap-mod](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-aws-darksite/darksite-openldap-mod) folder for procedures. You can build the container using the script or do it manually. - -4. **Deployment machine has Internet access - use viya4-deployment for baseline,install** - - 1. Use built in variables for baseline configurations in your `ansible-vars.yaml` file: - - Example `ansible-vars.yaml` provided [here](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) - - The goal here is to change the image references to point to ECR versus an Internet facing repo and add cluster subnet ID annotations for the nginx load balancers: - - Replace `{{ AWS_ACCT_ID }}` with your AWS account ID - - Replace `{{ AWS_REGION }}` with your AWS region - - Replace `{{ CONTROLLER_ECR_IMAGE_DIGEST }}` with image digest from ECR - - Replace `{{ WEBHOOK_ECR_IMAGE_DIGEST }}` with image digest from ECR - - If your VPC contains multiple subnets (unrelated to viya), you may need to add annotations to force the NLB to associate with the Viya subnets. More on that topic [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/deploy/subnet_discovery/). - - 2. Deploy viya4-deployment baseline,install. Note: the deployment virtual machine will pull the Helm charts from the Internet during this step. - -5. **Deployment machine has no Internet access - install baseline using Helm charts pulled from ECR** - - Two Options: - 1. If using OCI type repo (like ECR), we can use `viya4-deployment` but we'll need to make some changes to the baseline items in `ansible-vars.yaml`. An example provided [here](https://github.com/sassoftware/viya4-deployment/blob/feat/iac-1117/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) [here](https://github.com/sassoftware/viya4-deployment/blob/main/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml) includes the needed variables for OCI Helm support. Pay close attention to `XXX_CHART_URL` and `XXX_CHART_NAME` variables. - 2. Use Helm directly to "manually" install baseline items. - - Refer to baseline-helm-install-ecr README.md for instructions. - -6. **viya4-deployment viya,install** - - **Note:** As of `viya4-deployment` v6.0.0, the project uses the Deployment Operator as the default. The deployment operator has additional considerations in a Dark Site deployment because the repository warehouse for the metadata will not be available without Internet access (as it is pulled from ses.sas.com). - - - There are multiple options to mitigate the issue created by using the Deployment operator: - - 1. (Easiest/Recommended) Set `V4_DEPLOYMENT_OPERATOR_ENABLED` to false. This uses the sas-orchestration method for deployment instead of the Deployment Operator (no requirement for offline repository-warehouse hosting is required). - - 2. Supply the repository information through an internally deployed http server. SAS doesn't provide instructions on how to do this, because there are a lot of ways to accomplish this. One way to accomplish this is shared in this [TS Track](https://sirius.na.sas.com/Sirius/GSTS/ShowTrack.aspx?trknum=7613552746). - - 3. Store required metadata on a file system that can be mounted to the reconciler pod (using a transformer). [TIES Blog for instructions](http://sww.sas.com/blogs/wp/technical-insights/8466/configuring-a-repository-warehouse-for-a-sas-viya-platform-deployment-at-a-dark-site/sukhda/2023/02/28) - - 4. Use DAC with `DEPLOY: false` set. This will build the manifests and references in kustomization.yaml and stop there. Then you can proceed with manual installation steps: create site.yaml and apply it to the cluster (just ensure you are using the proper kustomization version!) - - - **Important:** ensure you specify `V4_CFG_CR_URL` in your ansible-vars. This should be your ECR URL + your viya namespace! - example: I used "viya4" as my Viya namespace.... `XXXXX.dkr.ecr.{{AWS_REGION}}.amazonaws.com/viya4` From d2a893b2e35b8ab473e9e6319a2780f29df279c8 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Thu, 9 May 2024 10:42:44 -0400 Subject: [PATCH 11/13] Fix line ending issues --- viya4-deployment-darksite/baseline-to-ecr/00_vars.sh | 2 +- viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh | 2 +- viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh | 6 +++--- viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh | 2 +- viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh | 2 +- viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh | 2 +- .../baseline-to-ecr/nfs_subdir_external_provisioner.sh | 2 +- viya4-deployment-darksite/baseline-to-ecr/openldap.sh | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh index 6e4fe534..330e48e0 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh @@ -7,4 +7,4 @@ AWS_REGION= K8S_minor_version=25 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! DEPLOYMENT_VERSION=main # main will pull latest release of viya4-deployment. But this can be set to a specific version if needed, example: 5.2.0 -DOCKER_SUDO= # put sudo here, if you require sudo docker commands... else leave blank \ No newline at end of file +DOCKER_SUDO= # put sudo here, if you require sudo docker commands... else leave blank diff --git a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh index 62a9c736..e3213af9 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh @@ -8,4 +8,4 @@ source 00_vars.sh . metrics_server.sh . nfs_subdir_external_provisioner.sh . openldap.sh -. ebs_driver.sh \ No newline at end of file +. ebs_driver.sh diff --git a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh index 272df9c7..719531fd 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh @@ -4,9 +4,9 @@ source 00_vars.sh # account for v6.3.0+ changes - autoscaler now supports k8s 1.25 DV=$(echo $DEPLOYMENT_VERSION | sed 's/\.//g') -if [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -ge 25 ]; then +if [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -ge 25 ]; then CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') -elif [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -le 24 ]; then +elif [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -le 24 ]; then CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') elif [ $DV -ge 630 ] && [ $K8S_minor_version -ge 25 ]; then CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') @@ -51,4 +51,4 @@ $DOCKER_SUDO docker tag $IMG_REPO:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaw aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com # # ## puch local image to ecr -$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG \ No newline at end of file +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh index 57f34626..6ca14381 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh @@ -54,4 +54,4 @@ aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --u $DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION $DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION $DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION -$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION \ No newline at end of file +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION diff --git a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh index 4e578c17..08d2a5b6 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh @@ -59,4 +59,4 @@ aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --u # # ## puch local image to ecr $DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG -$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG \ No newline at end of file +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh index abeb0262..c1e12ffb 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh @@ -38,4 +38,4 @@ $DOCKER_SUDO docker tag $REGISTRY/$IMAGE:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.a aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com # # ## puch local image to ecr -$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG \ No newline at end of file +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh index 7d3408f8..35f17e10 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh @@ -37,4 +37,4 @@ $DOCKER_SUDO docker tag $REPOSITORY:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazon aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com # # ## puch local image to ecr -$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG \ No newline at end of file +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh index 88a86c2a..bf96f5cb 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh @@ -23,4 +23,4 @@ $DOCKER_SUDO docker tag $IMAGE $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$I aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com # # ## puch local image to ecr -$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE \ No newline at end of file +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE From 0390ea1e07fdd7fcf21e41f8821752be2022ed97 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Thu, 9 May 2024 11:46:31 -0400 Subject: [PATCH 12/13] Remove extra lines --- viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh | 3 --- .../baseline-to-ecr/nfs_subdir_external_provisioner.sh | 3 --- 2 files changed, 6 deletions(-) diff --git a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh index 6ca14381..2cf12421 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh @@ -2,7 +2,6 @@ source 00_vars.sh - ## get chart version from viya4-deployment repo echo "**** cert-manager ****" CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CERT_MANAGER_CHART_VERSION') @@ -17,14 +16,12 @@ IMG_STARTUP=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | echo "controller repo: $IMG_CONTROLLER" && echo "webhook repo: $IMG_WEBHOOK" && echo "cainject repo: $IMG_CAINJECTOR" && echo "startupapicheck repo: $IMG_STARTUP" echo "*********************" - ## pull the images $DOCKER_SUDO docker pull $IMG_CONTROLLER:v$CHART_VERSION $DOCKER_SUDO docker pull $IMG_WEBHOOK:v$CHART_VERSION $DOCKER_SUDO docker pull $IMG_CAINJECTOR:v$CHART_VERSION $DOCKER_SUDO docker pull $IMG_STARTUP:v$CHART_VERSION - # create ECR repos aws ecr create-repository --no-cli-pager --repository-name cert-manager # this repo is used to store the helm chart aws ecr create-repository --no-cli-pager --repository-name $IMG_CONTROLLER diff --git a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh index 35f17e10..7c837ca9 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh @@ -15,7 +15,6 @@ echo "*****************************************" ## pull the image $DOCKER_SUDO docker pull $REPOSITORY:$TAG - # create ECR repo aws ecr create-repository --no-cli-pager --repository-name nfs-subdir-external-provisioner @@ -28,11 +27,9 @@ aws ecr get-login-password \ helm push nfs-subdir-external-provisioner-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ rm nfs-subdir-external-provisioner-$CHART_VERSION.tgz - # ## update local image tag appropriately $DOCKER_SUDO docker tag $REPOSITORY:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG - # # ## auth local $DOCKER_SUDO docker to ecr aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com From 0fc289d5c0a83c5e18080f1810b44b5fca8fc451 Mon Sep 17 00:00:00 2001 From: "David.Houck" Date: Tue, 14 May 2024 17:56:15 -0400 Subject: [PATCH 13/13] Add Copyright to .sh files --- viya4-deployment-darksite/baseline-to-ecr/00_vars.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh | 3 +++ .../baseline-to-ecr/nfs_subdir_external_provisioner.sh | 3 +++ viya4-deployment-darksite/baseline-to-ecr/openldap.sh | 3 +++ .../darksite-openldap-mod/darksite-openldap-mod.sh | 5 ++++- .../deployment-machine-assets/01_iac_deploy.sh | 5 ++++- .../deployment-machine-assets/02_dac_deploy.sh | 3 +++ .../install-baseline-helm-from-ecr/00_vars.sh | 5 ++++- .../install-baseline-helm-from-ecr/auto_scaler_install.sh | 5 ++++- .../install-baseline-helm-from-ecr/cert_manager_install.sh | 5 ++++- .../install-baseline-helm-from-ecr/ebs-csi-driver.sh | 5 ++++- .../install-baseline-helm-from-ecr/ingress_nginx_install.sh | 5 ++++- .../install-baseline-helm-from-ecr/metrics_server_install.sh | 5 ++++- .../nfs_provisioner_install.sh | 5 ++++- .../pg_nfs_provisioner_install.sh | 5 ++++- viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh | 5 ++++- .../mirrormgr-to-ecr/01_mirrormgr-ecr.sh | 5 ++++- viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh | 3 +++ 23 files changed, 81 insertions(+), 12 deletions(-) diff --git a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh index 330e48e0..9493e01e 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + ## set variables AWS_ACCT_ID= AWS_REGION= diff --git a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh index e3213af9..d4458ad2 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh . auto_scaler.sh diff --git a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh index 719531fd..2c33d497 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # account for v6.3.0+ changes - autoscaler now supports k8s 1.25 diff --git a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh index 2cf12421..cead0bce 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh ## get chart version from viya4-deployment repo diff --git a/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh index 58223a74..92af7851 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh ## get chart version from viya4-deployment repo diff --git a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh index 08d2a5b6..3773861e 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # determine chart version to use diff --git a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh index c1e12ffb..b97f1f5c 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh echo "**** metrics-server ****" diff --git a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh index 7c837ca9..c584a645 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh echo "**** nfs-subdir-external-provisioner ****" diff --git a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh index bf96f5cb..d7341149 100644 --- a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh +++ b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh echo "**** openldap ****" diff --git a/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh index f10c9de2..cd87defe 100644 --- a/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh +++ b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + # helper script to easily mod viya4-deployment when using openldap in a darksite @@ -183,4 +186,4 @@ docker tag viya4-deployment:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ aws ecr get-login-password --no-cli-pager --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com -docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG \ No newline at end of file +docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG diff --git a/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh index 142550ff..900c229e 100755 --- a/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh +++ b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + # what is the tag read -p "What is the tag for your viya4-iac-aws container? " -r TAG # what is the job @@ -48,4 +51,4 @@ if [ $REPLY == "destroy" ]; then viya4-iac-aws:$TAG \ destroy -auto-approve -var-file=/workspace/terraform.tfvars \ -state=/workspace/terraform.tfstate -fi \ No newline at end of file +fi diff --git a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh index f432163a..eab6ae34 100755 --- a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh +++ b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + # get viya4-deployment container tag echo -e "\n" read -p "What is your viya4-deployment container tag? " -r DOCKER_TAG diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh index 7cde5e20..7e72bca2 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh @@ -1,6 +1,9 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + AWS_ACCT_ID= AWS_REGION= -K8S_minor_version=24 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! \ No newline at end of file +K8S_minor_version=24 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh index 99767e64..4227ce3b 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # helm registry login @@ -41,4 +44,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh index 1b850f96..79f87639 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # helm registry login @@ -40,4 +43,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh index 3bff5d90..e863682e 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + ## installs ebs-csi-driver via helm source 00_vars.sh @@ -54,4 +57,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh index f31fdcda..2f9efe51 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + ## installs this by default: # - INGRESS_NGINX_CVE_2021_25742_PATCH # - ingress-nginx private ingress @@ -74,4 +77,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh index 029290f9..5459ac63 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # helm registry login @@ -20,4 +23,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh index d9c3bcf1..10a0fcd8 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # helm registry login @@ -41,4 +44,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh index 0e704125..51a8115c 100644 --- a/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + source 00_vars.sh # helm registry login @@ -42,4 +45,4 @@ helm upgrade --cleanup-on-fail \ # cleanup unset TMP_YAML -rm tmp.yaml \ No newline at end of file +rm tmp.yaml diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh index 57ade5eb..0eab8247 100644 --- a/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh @@ -1,7 +1,10 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + NAMESPACE= # namespace used for your viya install AWS_ACCT_ID= # your aws account ID REGION= # your aws region CERTS=~/viya/software/viya_order_assets/SASViyaV4_XXXX_certs.zip # path to the _certs.zip file -ASSETS=~/viya/software/viya_order_assets/SASViyaV4_XXX_XXXX-XXXX_deploymentAssets.tgz # path to the tgz assets file \ No newline at end of file +ASSETS=~/viya/software/viya_order_assets/SASViyaV4_XXX_XXXX-XXXX_deploymentAssets.tgz # path to the tgz assets file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh index b5fcf3ff..16db9717 100644 --- a/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + ## mirrormgr must be installed and in $PATH prior to running this script ## aws cli should be configured prior to running this script ## place your downloaded assets in the assets/ folder @@ -32,4 +35,4 @@ if [[ $REPLY =~ ^[Yy]$ ]]; then --destination https://$AWS_ACCT_ID.dkr.ecr.$REGION.amazonaws.com/$NAMESPACE \ --username 'AWS' \ --password $(aws ecr get-login-password --region $REGION) -fi \ No newline at end of file +fi diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh index f628fb57..d8f659ea 100644 --- a/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + # this script will help you quickly clean up viya related ECR repos ### source variables from 00_vars.sh