diff --git a/.PSScriptAnalyzerSettings.psd1 b/.PSScriptAnalyzerSettings.psd1 deleted file mode 100644 index fa147b6842..0000000000 --- a/.PSScriptAnalyzerSettings.psd1 +++ /dev/null @@ -1,19 +0,0 @@ -@{ - Severity = "Information" - ExcludeRules = @( - # Ignore error when using 'ConvertTo-SecureString' with '-AsPlainText' - # and a string argument rather than an encrypted string - "PSAvoidUsingConvertToSecureStringWithPlainText", - # Ignore error when a function has both 'Username' and 'Password' - # arguments - "PSAvoidUsingUsernameAndPasswordParams", - # Ignore DSC errors as we want to phase out opaque DSC calls - "PSDSCDscTestsPresent", - "PSDSCDscExamplesPresent", - "PSDSCUseVerboseMessageInDSCResource", - # TODO: stop ignoring these - "PSAvoidUsingPositionalParameters", - "PSProvideCommentHelp", - "PSUseOutputTypeCorrectly" - ) -} diff --git a/.PSScriptFormatterSettings.psd1 b/.PSScriptFormatterSettings.psd1 deleted file mode 100644 index 2fa2a5d48f..0000000000 --- a/.PSScriptFormatterSettings.psd1 +++ /dev/null @@ -1,48 +0,0 @@ -@{ - IncludeRules = @( - "PSAlignAssignmentStatement", - "PSPlaceCloseBrace", - "PSPlaceOpenBrace", - "PSUseConsistentIndentation", - "PSUseConsistentWhitespace", - "PSUseCorrectCasing" - ) - Rules = @{ - PSAlignAssignmentStatement = @{ - Enable = $true - CheckHashtable = $true - } - PSPlaceCloseBrace = @{ - Enable = $true - NewLineAfter = $false - IgnoreOneLineBlock = $true - NoEmptyLineBefore = $false - } - PSPlaceOpenBrace = @{ - Enable = $true - OnSameLine = $true - NewLineAfter = $true - IgnoreOneLineBlock = $true - } - PSUseConsistentIndentation = @{ - Enable = $false - Kind = "space" - PipelineIndentation = "IncreaseIndentationForFirstPipeline" - IndentationSize = 4 - } - PSUseConsistentWhitespace = @{ - Enable = $true - CheckInnerBrace = $true - CheckOpenBrace = $true - CheckOpenParen = $true - CheckOperator = $true - CheckPipe = $true - CheckPipeForRedundantWhitespace = $true - CheckSeparator = $true - CheckParameter = $true - } - PSUseCorrectCasing = @{ - Enable = $true - } - } -} \ No newline at end of file diff --git a/.all-contributorsrc b/.all-contributorsrc index 6fd0c5a733..54a6a36e65 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -590,6 +590,25 @@ "bug", "review" ] + }, + { + "login": "J0shev", + "name": "Josh Everett", + "avatar_url": "https://avatars.githubusercontent.com/u/17052866?v=4", + "profile": "https://github.com/J0shev", + "contributions": [ + "bug" + ] + }, + { + "login": "dsj976", + "name": "David Salvador Jasin", + "avatar_url": "https://avatars.githubusercontent.com/u/57944311?v=4", + "profile": "https://github.com/dsj976", + "contributions": [ + "bug", + "doc" + ] } ], "contributorsSortAlphabetically": true, diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 192052cf08..b2ac959fe6 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,50 +1,42 @@ -ARG VARIANT=3.10 -FROM python:${VARIANT}-buster +ARG VARIANT=3.12 +ARG DISTRIBUTION="bookworm" +FROM python:${VARIANT}-${DISTRIBUTION} -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && apt-get -y install --no-install-recommends apt-transport-https \ - ca-certificates \ - curl \ - git \ - gnupg \ - libssl-dev \ - lsb-release \ - python3-sphinx \ - sudo - -# Set package versions -ARG AZURE_CLI_VERSION="2.58.0" -ARG PWSH_VERSION="7.4.3" +RUN apt-get update \ + && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends \ + git \ + libssl-dev \ + python3-sphinx # Install Azure-CLI -# Get Microsoft signing key -RUN sudo mkdir -p /etc/apt/keyrings \ - && curl -sLS https://packages.microsoft.com/keys/microsoft.asc | \ - gpg --dearmor | \ - sudo tee /etc/apt/keyrings/microsoft.gpg > /dev/null \ - && sudo chmod go+r /etc/apt/keyrings/microsoft.gpg \ - && AZ_DIST=$(lsb_release -cs) \ - && echo "deb [arch=`dpkg --print-architecture` signed-by=/etc/apt/keyrings/microsoft.gpg] https://packages.microsoft.com/repos/azure-cli/ $AZ_DIST main" | \ - sudo tee /etc/apt/sources.list.d/azure-cli.list \ - && sudo apt-get update \ - && sudo apt-get install azure-cli=$AZURE_CLI_VERSION-1~$AZ_DIST +RUN apt-get update \ + && apt-get -y install --no-install-recommends \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg \ + lsb-release +RUN mkdir -p /etc/apt/keyrings \ + && curl -sLS https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /etc/apt/keyrings/microsoft.gpg \ + && chmod go+r /etc/apt/keyrings/microsoft.gpg -# Install Powershell -# Pull different binaries from Github depending on system architecture -# The standard APT method currently only works for `amd64` -# Set up TARGETARCH variable to use to pull the right binaries for the current architecture. -ARG TARGETARCH -RUN if [ "${TARGETARCH}" = "arm64" ]; \ - then \ - DEBARCH="arm64"; \ - else \ - DEBARCH="x86"; \ - fi; \ - curl -L -o /tmp/powershell.tar.gz https://github.com/PowerShell/PowerShell/releases/download/v${PWSH_VERSION}/powershell-${PWSH_VERSION}-linux-$DEBARCH.tar.gz \ - && mkdir -p /opt/microsoft/powershell/7 \ - && tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7 \ - && chmod +x /opt/microsoft/powershell/7/pwsh \ - && ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh +# Set package versions +ARG AZURE_CLI_VERSION="2.59.0" +ARG DISTRIBUTION + +# Add the Azure CLI repository +RUN cat < /etc/apt/sources.list.d/azure-cli.sources +Types: deb +URIs: https://packages.microsoft.com/repos/azure-cli/ +Suites: $(lsb_release -cs) +Components: main +Architectures: $(dpkg --print-architecture) +Signed-by: /etc/apt/keyrings/microsoft.gpg +EOF + +RUN apt-get update \ + && apt-get -y install azure-cli=${AZURE_CLI_VERSION}-1~${DISTRIBUTION} # Create non-root user and give them sudo access ARG USERNAME=deploydsh @@ -64,9 +56,8 @@ USER $USERNAME COPY ./docs/requirements.txt /build/requirements.txt RUN pip3 install -r /build/requirements.txt -# Install/check needed powershell modules -COPY ./deployment/CheckRequirements.ps1 /build/CheckRequirements.ps1 -COPY ./deployment/common/Logging.psm1 /build/common/Logging.psm1 -RUN pwsh -Command "& {Set-PSRepository -Name PSGallery -InstallationPolicy Trusted}" \ - && pwsh -File /build/CheckRequirements.ps1 -InstallMissing -IncludeDev\ - && sudo rm -rf /build/ +# Set PATH for pulumi - pulumi installed as feature to work round installing as root +ENV PATH=$PATH:/home/${USERNAME}/.pulumi/bin + +# Install hatch to build the dsh cli tool +RUN pip3 install hatch \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 7ecddb18f8..f8e49feed9 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,21 +1,24 @@ -// Development container for the Data Safe Haven project +// Development container for the Data Safe Haven project pulumi { - "name": "Turing Data Safe Haven", - "build": { - "context": "..", - "dockerfile": "Dockerfile" - }, - "customizations": { - "vscode": { - "settings": { - "terminal.integrated.defaultProfile.linux": "bash" - }, - "extensions": [ - "github.vscode-pull-request-github", - "ms-vscode.powershell", - "ms-vscode.azure-account" - ] - } - }, - "remoteUser": "deploydsh" + "name": "Turing Data Safe Haven", + "build": { + "context": "..", + "dockerfile": "Dockerfile" + }, + "settings": { + "terminal.integrated.defaultProfile.linux": "bash" + }, + "extensions": [ + "github.vscode-pull-request-github", + "ms-vscode.azure-account", + "ms-python.python", + "christian-kohler.path-intellisense" + ], + "remoteUser": "deploydsh", + "features": { + "ghcr.io/devcontainers-contrib/features/pulumi:1": { + "version": "latest", + "bashCompletion": true + } + } } diff --git a/.flake8 b/.flake8 deleted file mode 100644 index ce0f6aa24a..0000000000 --- a/.flake8 +++ /dev/null @@ -1,15 +0,0 @@ -[flake8] -# Default ignored error codes (rules not enforced by PEP8) -# E121 - continuation line under-indented for hanging indent -# E123 - closing bracket does not match indentation of opening bracket’s line -# E126 - continuation line over-indented for hanging indent -# E226 - missing whitespace around arithmetic operator -# E241 - multiple spaces after ‘,’ -# E242 - tab after ‘,’ -# E704 - multiple statements on one line (def) -# W503 - line break before binary operator -# W504 - line break after binary operator -# -# Extra ignored error codes -# E501 - line too long -ignore = E121, E123, E126, E226, E24, E704, E501, W503, W504 diff --git a/.github/ISSUE_TEMPLATE/deployment_bug_report.md b/.github/ISSUE_TEMPLATE/deployment_bug_report.md index e7ab97b1ad..6cf453cc13 100644 --- a/.github/ISSUE_TEMPLATE/deployment_bug_report.md +++ b/.github/ISSUE_TEMPLATE/deployment_bug_report.md @@ -2,7 +2,7 @@ name: Deployment bug report about: Report a bug that occurs during deployment title: "" -labels: "type: deployment bug" +labels: "bug" assignees: "" --- @@ -20,12 +20,24 @@ Before reporting a problem please check the following. Replace the empty checkbo ## :computer: System information -- **Operating System:** -- **Data Safe Haven version:** +- **Operating System:** +- **Data Safe Haven version:** -## :cactus: Powershell module versions +## :package: Packages - +
+List of packages + + + +```none +Paste list of packages here +``` + +
## :no_entry_sign: Describe the problem diff --git a/.github/ISSUE_TEMPLATE/documentation_improvement.md b/.github/ISSUE_TEMPLATE/documentation_improvement.md index d6f053ba92..daa1dd0f6a 100644 --- a/.github/ISSUE_TEMPLATE/documentation_improvement.md +++ b/.github/ISSUE_TEMPLATE/documentation_improvement.md @@ -2,7 +2,7 @@ name: Documentation improvement about: Request changes to Data Safe Haven documentation title: "" -labels: "type: documentation" +labels: "documentation" assignees: "" --- diff --git a/.github/ISSUE_TEMPLATE/enhancement_request.md b/.github/ISSUE_TEMPLATE/enhancement_request.md index 826d23cc5a..dda8a80777 100644 --- a/.github/ISSUE_TEMPLATE/enhancement_request.md +++ b/.github/ISSUE_TEMPLATE/enhancement_request.md @@ -2,7 +2,7 @@ name: Enhancement request about: Request new features or upgrades to existing features title: "" -labels: "type: enhancement" +labels: "enhancement" assignees: "" --- diff --git a/.github/ISSUE_TEMPLATE/release_checklist.md b/.github/ISSUE_TEMPLATE/release_checklist.md index acca08b093..a25064faa1 100644 --- a/.github/ISSUE_TEMPLATE/release_checklist.md +++ b/.github/ISSUE_TEMPLATE/release_checklist.md @@ -2,7 +2,7 @@ name: Release checklist about: Log completion of required actions for release testing title: "Release: " -labels: "type: release-candidate" +labels: "release candidate" assignees: "" --- @@ -25,11 +25,11 @@ Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deplo ### For minor releases and above - [ ] Deploy an SHM from this branch and save a transcript of the deployment logs -- [ ] Build an SRE compute image (SRD) and save transcripts of the logs - Using the new image, deploy a tier 2 and a tier 3 SRE - [ ] Save the transcript of your tier 2 SRE deployment - [ ] Save the transcript of your tier 3 SRE deployment - [ ] Complete the [Security evaluation checklist](https://data-safe-haven.readthedocs.io/en/latest/deployment/security_checklist.html) from the deployment documentation +- [ ] Add the new versions tag as an active build on [Read The Docs](https://readthedocs.org) (You can add as a hidden build, before release, to preview) ### For major releases only diff --git a/.github/ISSUE_TEMPLATE/software_package_request.md b/.github/ISSUE_TEMPLATE/software_package_request.md deleted file mode 100644 index 2afdb8b800..0000000000 --- a/.github/ISSUE_TEMPLATE/software_package_request.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: Software package request -about: Request that a new package is added to the Tier 3 allow list -title: "" -labels: "type: enhancement" -assignees: "" ---- - -## :white_check_mark: Checklist - - - -- [ ] I have searched open and closed issues for duplicates. -- [ ] This is a request for a new software package to be added to the Data Safe Haven -- [ ] The package is still missing in the [latest version](https://github.com/alan-turing-institute/data-safe-haven/releases). - -## :gift: Package details - - - -## :steam_locomotive: Why is this needed? - - diff --git a/.github/ISSUE_TEMPLATE/system_manager_functionality_bug_report.md b/.github/ISSUE_TEMPLATE/system_manager_functionality_bug_report.md index 9a989f4f06..35317c57ea 100644 --- a/.github/ISSUE_TEMPLATE/system_manager_functionality_bug_report.md +++ b/.github/ISSUE_TEMPLATE/system_manager_functionality_bug_report.md @@ -2,7 +2,7 @@ name: System manager functionality bug report about: Report a bug that occurs when managing a Data Safe Haven title: "" -labels: "type: functionality bug" +labels: "bug" assignees: "" --- @@ -20,17 +20,36 @@ Before reporting a problem please check the following. Replace the empty checkbo ## :computer: System information -- **Data Safe Haven version:** -- **Operating system details:** +- **Operating System:** +- **Data Safe Haven version:** -## :cactus: Powershell module versions +## :package: Packages - +
+List of packages + + + +```none +Paste list of packages here +``` + +
## :no_entry_sign: Describe the problem ## :steam_locomotive: Workarounds or solutions diff --git a/.github/ISSUE_TEMPLATE/user_functionality_bug_report.md b/.github/ISSUE_TEMPLATE/user_functionality_bug_report.md index 7cf77f1e7f..40422a4f5b 100644 --- a/.github/ISSUE_TEMPLATE/user_functionality_bug_report.md +++ b/.github/ISSUE_TEMPLATE/user_functionality_bug_report.md @@ -2,7 +2,7 @@ name: End-user functionality bug report about: Report a bug that occurs when you are using the Data Safe Haven title: "" -labels: "type: functionality bug" +labels: "bug" assignees: "" --- @@ -20,14 +20,21 @@ Before reporting a problem please check the following. Replace the empty checkbo ## :computer: System information -- **Data Safe Haven version:** +- **Operating System:** +- **Data Safe Haven version:** - **Browser details:** -- **Operating system details:** ## :no_entry_sign: Describe the problem ## :steam_locomotive: Workarounds or solutions diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..732ed0c6b7 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +--- + +version: 2 +updates: + # GitHub Action update PRs + - package-ecosystem: "github-actions" + directory: "/" # The exact logic is unclear, but it recursively searches at least .github/workflows/ + schedule: + interval: "weekly" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1b2533d089..93c5ebb970 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -3,15 +3,20 @@ - [ ] You have given your pull request a meaningful title (_e.g._ `Enable foobar integration` rather than `515 foobar`). - [ ] You are targeting the appropriate branch. If you're not certain which one this is, it should be **`develop`**. - [ ] Your branch is up-to-date with the **target branch** (it probably was when you started, but it may have changed since then). -- [ ] You have marked this pull request as a **draft** and added `'[WIP]'` to the title if needed (if you're not yet ready to merge). -- [ ] You have formatted your code using appropriate automated tools (for example `./tests/AutoFormat_Powershell.ps1 -TargetPath ` for Powershell). + +### :vertical_traffic_light: Depends on + + ### :arrow_heading_up: Summary diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt index 5e493cd1b2..4380b52b37 100644 --- a/.github/scripts/requirements.txt +++ b/.github/scripts/requirements.txt @@ -1,3 +1,4 @@ lxml natsort +packaging requests \ No newline at end of file diff --git a/.github/scripts/update_docker_versions.py b/.github/scripts/update_docker_versions.py new file mode 100644 index 0000000000..879b62bbb9 --- /dev/null +++ b/.github/scripts/update_docker_versions.py @@ -0,0 +1,96 @@ +#! /usr/bin/env python3 + +import pathlib +import re +from contextlib import suppress + +import requests +from packaging import version + + +def get_dockerhub_versions(image_details: str) -> tuple[str, str, list[str]]: + """Get versions for DockerHub images (via API)""" + image_name, version = image_details.split(":") + if "/" in image_name: + namespace, image_name = image_name.split("/") + else: + namespace = "library" + response = requests.get( + f"https://registry.hub.docker.com/v2/repositories/{namespace}/{image_name}/tags?page_size=1000", + timeout=60, + ) + versions = [result["name"] for result in response.json()["results"]] + return (image_name, version, versions) + + +def get_github_versions(image_details: str) -> tuple[str, str, list[str]]: + """Get versions for GitHub images (via manual scraping)""" + _, organisation, image_name, version = re.split("[:/]", image_details) + response = requests.get( + f"https://github.com/{organisation}/{image_name}/pkgs/container/{image_name}/versions", + timeout=60, + ) + versions = [ + re.search(r"tag=([^\"]+)\"", line).group(1) + for line in response.content.decode("utf-8").split() + if "tag=" in line + ] + return (image_name, version, versions) + + +def get_quayio_versions(image_details: str) -> tuple[str, str, list[str]]: + """Get versions for Quay.IO images (via API)""" + _, organisation, image_name, version = re.split("[:/]", image_details) + response = requests.get( + f"https://quay.io/api/v1/repository/{organisation}/{image_name}?includeTags=true", + timeout=60, + ) + versions = list(response.json()["tags"].keys()) + return (image_name, version, versions) + + +def annotate( + versions: list[str], *, stable_only: bool +) -> list[tuple[str, version.Version]]: + """Annotate a list of potential version strings with the parsed version""" + annotated = [] + for version_str in versions: + with suppress(version.InvalidVersion): + version_ = version.parse(version_str) + if stable_only and ( + version_.is_devrelease + or version_.is_prerelease + or version_.is_postrelease + ): + continue + annotated.append((version_str, version_)) + return annotated + + +for filename in (pathlib.Path("data_safe_haven") / "infrastructure").glob("**/*.py"): + needs_replacement = False + lines = [] + with open(filename) as f_pulumi: + for line in f_pulumi: + output = line + if re.search(r".*image=.*", line): + image_details = line.split('"')[1] + if image_details.startswith("ghcr.io"): + image, v_current, available = get_github_versions(image_details) + elif image_details.startswith("quay.io"): + image, v_current, available = get_quayio_versions(image_details) + else: + image, v_current, available = get_dockerhub_versions(image_details) + stable_versions = [v for v in annotate(available, stable_only=True)] + v_latest = sorted(stable_versions, key=lambda v: v[1], reverse=True)[0][0] + if v_current != v_latest: + print(f"Updating {image} from {v_current} to {v_latest} in {filename}") # noqa: T201 + needs_replacement = True + output = line.replace(v_current, v_latest) + else: + print(f"Leaving {image} at {v_current} (latest version) in {filename}") # noqa: T201 + lines += output + + if needs_replacement: + with open(filename, "w") as f_pulumi: + f_pulumi.writelines(lines) diff --git a/.github/scripts/update_python_dependencies.sh b/.github/scripts/update_python_dependencies.sh new file mode 100755 index 0000000000..b7134ea54d --- /dev/null +++ b/.github/scripts/update_python_dependencies.sh @@ -0,0 +1,23 @@ +#! /usr/bin/env sh +set -e + +# Check for required arguments +if [ "$#" -ne 2 ]; then + echo "Usage: update_python_dependencies [environment_name] [target]" + exit 1 +fi +ENV_NAME=$1 +TARGET=$2 + +# Check for pip-compile +if ! command -v pip-compile > /dev/null; then + echo "pip-compile could not be found" + exit 1 +fi + +# Run pip-compile +if [ "$ENV_NAME" = "default" ]; then + pip-compile -U pyproject.toml -c requirements-constraints.txt -o "$TARGET" +else + hatch env show --json | jq -r ".${ENV_NAME}.dependencies | .[]" | pip-compile - -U -c requirements-constraints.txt -o "$TARGET" +fi diff --git a/.github/security_checklist_template.md b/.github/security_checklist_template.md index cbdade3596..b963331eef 100644 --- a/.github/security_checklist_template.md +++ b/.github/security_checklist_template.md @@ -2,7 +2,7 @@ Running on SHM/SREs deployed using commit XXXXXXX ## Summary -+ :white_check_mark:N tests passed ++ :white_check_mark: N tests passed - :partly_sunny: N tests partially passed (see below for more details) - :fast_forward: N tests skipped (see below for more details) - :x: N tests failed (see below for more details) @@ -11,7 +11,6 @@ Running on SHM/SREs deployed using commit XXXXXXX Some security checks were skipped since: - No managed device was available - No access to a physical space with its own dedicated network was possible -- No access to a secure email service was available ### Multifactor Authentication and Password strength + :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the SRE standard user cannot access the apps @@ -117,7 +116,7 @@ Some security checks were skipped since: ### Data ingress + :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** with no access to a secure email server the token was sent by unencrypted email ++ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** token was sent using a secure, out-of-band communication channel (e.g. secure email) + :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an allow-listed IP address succeeds + :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** downloading a file from an allow-listed IP address fails + :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an non-allowed IP address fails diff --git a/.github/workflows/build_allow_lists.yaml b/.github/workflows/build_allow_lists.yaml deleted file mode 100644 index 45bd2b714c..0000000000 --- a/.github/workflows/build_allow_lists.yaml +++ /dev/null @@ -1,80 +0,0 @@ ---- -name: Build allow lists - -# Run workflow on pushes to matching branches -on: # yamllint disable-line rule:truthy - push: - branches: [develop] - schedule: - - cron: "0 0 */6 * *" # run every six days in order to keep the cache fresh - workflow_dispatch: # allow this workflow to be manually triggered - -# checkout needs 'contents:read' -# pull request needs 'pull-requests:write' and 'contents:write' -permissions: - contents: write - pull-requests: write - -env: - TIMEOUT_REACHED: 0 - -jobs: - build_allow_lists: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Cache dependencies - uses: actions/cache@v3 - with: - path: environment_configs/package_lists/dependency-cache.json - key: dependencies-${{ github.sha }} # request a cache that does not yet exist - restore-keys: dependencies- # use prefix matching to fallback to the most recently created SHA-specific cache - - - name: Check PyPI allow list - shell: pwsh - env: - LIBRARIES_IO_API_KEY: ${{ secrets.LIBRARIES_IO_API_KEY }} - run: ./deployment/administration/SHM_Expand_Allowlist_Dependencies.ps1 -Repository pypi -TimeoutMinutes 60 -ApiKey "$env:LIBRARIES_IO_API_KEY" # this will set TIMEOUT_REACHED if the timeout is reached - - - name: Check CRAN allow list - shell: pwsh - env: - LIBRARIES_IO_API_KEY: ${{ secrets.LIBRARIES_IO_API_KEY }} - run: ./deployment/administration/SHM_Expand_Allowlist_Dependencies.ps1 -Repository cran -TimeoutMinutes 240 -ApiKey "$env:LIBRARIES_IO_API_KEY" # this will set TIMEOUT_REACHED if the timeout is reached - - - name: Check for changes - shell: bash - run: git --no-pager diff -- . ':!environment_configs/package_lists/dependency-cache.json' - - - name: Get current date - id: date - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Create pull request - if: ${{ (env.TIMEOUT_REACHED == 0) && (! env.ACT) }} - id: pull-request - uses: peter-evans/create-pull-request@38e0b6e68b4c852a5500a94740f0e535e0d7ba54 # This commit corresponds to tag 4.2.4 - with: - commit-message: Update PyPI and CRAN allow lists - committer: GitHub Actions - author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> - base: develop - branch: package-allowlist-updates - delete-branch: true - title: Update PyPI and CRAN allow lists - body: | - ### :arrow_heading_up: Summary - - Apply package allowlist diff from ${{ github.sha }} on ${{ steps.date.outputs.date }} - - ### :closed_umbrella: Related issues - None - - ### :microscope: Tests - Allow-list only - labels: | - affected: developers - severity: minor - type: enhancement - draft: false diff --git a/.github/workflows/build_documentation.yaml b/.github/workflows/build_documentation.yaml new file mode 100644 index 0000000000..8243cfdc97 --- /dev/null +++ b/.github/workflows/build_documentation.yaml @@ -0,0 +1,52 @@ +--- +name: Build documentation + +# Run workflow on pushes to matching branches +on: # yamllint disable-line rule:truthy + push: + branches: [develop, latest] + pull_request: + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: Install hatch + run: pip install hatch + + - name: Build documentation + run: hatch run docs:build + + check_links: + name: Check links + runs-on: ubuntu-latest + needs: build + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: Install hatch + run: pip install hatch + + - name: Build documentation + run: hatch run docs:build + + - name: Link Checker + uses: lycheeverse/lychee-action@v1.10.0 + with: + args: --config='./.lychee.toml' --no-progress './docs/build/html/**/*.html' + fail: true # fail on broken links diff --git a/.github/workflows/citation.yaml b/.github/workflows/citation.yaml index c33128840c..ce29866276 100644 --- a/.github/workflows/citation.yaml +++ b/.github/workflows/citation.yaml @@ -10,7 +10,7 @@ jobs: name: "validate" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Validate CITATION.cff uses: citation-file-format/cffconvert-github-action@2.0.0 diff --git a/.github/workflows/documentation.yaml b/.github/workflows/documentation.yaml deleted file mode 100644 index 2c8bf6e2b5..0000000000 --- a/.github/workflows/documentation.yaml +++ /dev/null @@ -1,57 +0,0 @@ ---- -name: Documentation - -# Run workflow on pushes to matching branches -on: # yamllint disable-line rule:truthy - push: - branches: [develop, latest] - pull_request: - branches: [develop, latest] - -jobs: - build: - name: Build - runs-on: ubuntu-latest - defaults: - run: - working-directory: ./docs/ - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - name: Install dependencies - run: | - pip install -r requirements.txt - - name: Sphinx build - run: | - make html SPHINXOPTS="-W" - - check_links: - name: Check links - runs-on: ubuntu-latest - needs: build - defaults: - run: - working-directory: ./docs/ - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - name: Install dependencies - run: | - pip install -r requirements.txt - - name: Sphinx build - # Set warnings to be treated as errors - run: | - make html SPHINXOPTS="-W" - - name: Link Checker - uses: lycheeverse/lychee-action@v1.7.0 - with: - args: --config='./.lychee.toml' --no-progress './docs/build/html/**/*.html' - fail: true # fail on broken links diff --git a/.github/workflows/lint_code.yaml b/.github/workflows/lint_code.yaml index 8292e81515..1a37a3e7d9 100644 --- a/.github/workflows/lint_code.yaml +++ b/.github/workflows/lint_code.yaml @@ -6,14 +6,34 @@ on: # yamllint disable-line rule:truthy push: branches: [develop, latest] pull_request: - branches: [develop, latest] jobs: + lint_caddy: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install caddy + shell: bash + run: | + sudo mkdir -m 0755 -p /usr/share/keyrings/ + sudo wget 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' -O - | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg + sudo wget 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' -O /etc/apt/sources.list.d/caddy-stable.list + sudo apt update + sudo apt install caddy + - name: Lint Caddy + shell: bash + run: | + # Check Caddyfile formatting + find . -name Caddyfile -print0 | xargs -0 -n 1 caddy fmt > /dev/null + # Check Caddyfile validity + find . -name Caddyfile -print0 | xargs -0 -n 1 -I {} caddy validate --config "{}" + lint_json: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install requirements shell: bash run: npm install -g jsonlint mustache @@ -27,44 +47,36 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install requirements shell: bash run: sudo gem install mdl - name: Lint Markdown run: mdl --style .mdlstyle.rb . - lint_powershell: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Install requirements - shell: pwsh - run: | - Set-PSRepository PSGallery -InstallationPolicy Trusted - Install-Module PSScriptAnalyzer - - name: Lint PowerShell - shell: pwsh - run: Invoke-ScriptAnalyzer -Path . -Settings .PSScriptAnalyzerSettings.psd1 -Recurse -EnableExit -ReportSummary - lint_python: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 - - name: Install requirements - shell: bash - run: pip install flake8 + uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install hatch + run: pip install hatch + - name: Print package versions + run: | + hatch run lint:ruff --version + hatch run lint:black --version - name: Lint Python - shell: bash - run: flake8 . --statistics --count + run: hatch run lint:all lint_shell: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install requirements shell: bash run: sudo apt install shellcheck @@ -76,7 +88,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install requirements shell: bash run: | @@ -92,7 +104,7 @@ jobs: done rm expanded.tmp - name: Lint YAML - uses: karancode/yamllint-github-action@dd59165b84d90d37fc919c3c7dd84c7e37cd6bfb # this is 2.0.0 + uses: karancode/yamllint-github-action@v2.1.1 with: yamllint_strict: true yamllint_comment: false diff --git a/.github/workflows/test_code.yaml b/.github/workflows/test_code.yaml index dde3ef0a1a..52bce85ef6 100644 --- a/.github/workflows/test_code.yaml +++ b/.github/workflows/test_code.yaml @@ -1,38 +1,61 @@ --- name: Test code -# Run workflow on pushes to matching branches +# Run workflow on PRs and pushes to matching branches on: # yamllint disable-line rule:truthy push: branches: [develop, latest] pull_request: - branches: [develop, latest] jobs: - test_powershell: + test_python: runs-on: ubuntu-latest + permissions: + # Gives the action the necessary permissions for publishing new + # comments in pull requests. + pull-requests: write + # Gives the action the necessary permissions for pushing data to the + # python-coverage-comment-action branch, and for editing existing + # comments (to avoid publishing multiple comments in the same PR) + contents: write steps: - name: Checkout code - uses: actions/checkout@v3 - - name: Install requirements - shell: pwsh - run: | - Set-PSRepository PSGallery -InstallationPolicy Trusted - deployment/CheckRequirements.ps1 -InstallMissing -IncludeDev - - name: Test PowerShell - shell: pwsh - run: ./tests/Run_Pester_Tests.ps1 + uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install hatch + run: pip install hatch + - name: Test Python + run: hatch run test:test-coverage + # For security reasons, PRs created from forks cannot generate PR comments directly + # (see https://securitylab.github.com/research/github-actions-preventing-pwn-requests/). + # Instead we need to trigger another workflow after this one completes. + - name: Generate coverage comment + id: coverage_comment + uses: py-cov-action/python-coverage-comment-action@v3 + with: + GITHUB_TOKEN: ${{ github.token }} + # Save the coverage comment for later use + # See https://github.com/py-cov-action/python-coverage-comment-action/blob/main/README.md + - name: Save coverage comment as an artifact + uses: actions/upload-artifact@v4 + if: steps.coverage_comment.outputs.COMMENT_FILE_WRITTEN == 'true' + with: + name: python-coverage-comment-action + path: python-coverage-comment-action.txt test_markdown_links: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install requirements shell: bash run: npm install -g markdown-link-check - name: Link Checker - uses: lycheeverse/lychee-action@v1.7.0 + uses: lycheeverse/lychee-action@v1.10.0 with: args: --config='./.lychee.toml' --no-progress --offline '**/*.md' --exclude-path './docs' fail: true # fail on broken links diff --git a/.github/workflows/test_coverage.yaml b/.github/workflows/test_coverage.yaml new file mode 100644 index 0000000000..82bed5a021 --- /dev/null +++ b/.github/workflows/test_coverage.yaml @@ -0,0 +1,32 @@ +--- +name: Test code - post coverage comment + +# Run workflow after test_code has completed +on: # yamllint disable-line rule:truthy + workflow_run: + workflows: ["Test code"] + types: + - completed + +jobs: + coverage: + runs-on: ubuntu-latest + if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' + permissions: + # Gives the action the necessary permissions for publishing new + # comments in pull requests. + pull-requests: write + # Gives the action the necessary permissions for editing existing + # comments (to avoid publishing multiple comments in the same PR) + contents: write + # Gives the action the necessary permissions for looking up the + # workflow that launched this workflow, and download the related + # artifact that contains the comment to be published + actions: read + steps: + # Post the pre-generated coverage comment + - name: Post coverage comment + uses: py-cov-action/python-coverage-comment-action@v3 + with: + GITHUB_TOKEN: ${{ github.token }} + GITHUB_PR_RUN_ID: ${{ github.event.workflow_run.id }} diff --git a/.github/workflows/update_package_versions.yaml b/.github/workflows/update_docker_versions.yaml similarity index 52% rename from .github/workflows/update_package_versions.yaml rename to .github/workflows/update_docker_versions.yaml index 7ef89355a7..6c26183892 100644 --- a/.github/workflows/update_package_versions.yaml +++ b/.github/workflows/update_docker_versions.yaml @@ -1,12 +1,9 @@ --- -name: Update package versions +name: Update Docker image versions -# Run workflow on pushes to matching branches on: # yamllint disable-line rule:truthy - push: - branches: [develop] schedule: - - cron: "0 0 */7 * *" # run once per week + - cron: "0 3 * * 1" # run at 3:00 every Monday workflow_dispatch: # allow this workflow to be manually triggered # checkout needs 'contents:read' @@ -16,65 +13,55 @@ permissions: pull-requests: write jobs: - update_package_versions: + update_docker_versions: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.12' - name: Install Python packages run: | pip install --upgrade pip pip install -r .github/scripts/requirements.txt - - name: Update Azure Data Studio version + - name: Update Docker image versions run: | - python .github/scripts/update_azure_data_studio.py - - - name: Update RStudio version - run: | - python .github/scripts/update_rstudio.py - - - name: Update DBeaver driver versions - run: | - python .github/scripts/update_dbeaver_drivers.py - - - name: Check for changes - shell: bash - run: git --no-pager diff -- . + python .github/scripts/update_docker_versions.py - name: Get current date id: date - run: echo "::set-output name=date::$(date +'%Y-%m-%d')" + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - name: Create pull request if: ${{ ! env.ACT }} id: pull-request - uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # This commit corresponds to tag 5.0.2 + uses: peter-evans/create-pull-request@v6.1.0 with: - commit-message: Update SRD package versions - committer: GitHub Actions author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> base: develop - branch: srd-package-versions - delete-branch: true - title: Update SRD package versions body: | + :warning: In order for CI to run on this PR it needs to be manually closed and re-opened :warning: + ### :arrow_heading_up: Summary - - Apply package version diff from ${{ github.sha }} on ${{ steps.date.outputs.date }} + - Update Docker image versions from ${{ github.sha }} on ${{ steps.date.outputs.date }} ### :closed_umbrella: Related issues None ### :microscope: Tests Package versions only + branch: update-docker-images + commit-message: ":arrow_up: Update Pulumi Docker images" + committer: GitHub Actions + delete-branch: true + draft: false labels: | affected: developers severity: minor type: enhancement - draft: false + title: ":arrow_up: Update Pulumi Docker images" diff --git a/.github/workflows/update_python_dependencies.yaml b/.github/workflows/update_python_dependencies.yaml new file mode 100644 index 0000000000..7342b6a309 --- /dev/null +++ b/.github/workflows/update_python_dependencies.yaml @@ -0,0 +1,67 @@ +--- +name: Update Python dependencies + +# Run workflow on pushes to matching branches +on: # yamllint disable-line rule:truthy + schedule: + - cron: "0 3 * * 1" # run at 3:00 every Monday + workflow_dispatch: # allow this workflow to be manually triggered + + +jobs: + update_python_dependencies: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: Install dependencies + run: pip install hatch pip-tools + + - name: Update 'default' dependencies + run: .github/scripts/update_python_dependencies.sh default requirements.txt + + - name: Update 'docs' dependencies + run: .github/scripts/update_python_dependencies.sh docs docs/requirements.txt + + - name: Check for changes + shell: bash + run: git --no-pager diff -- . + + - name: Get current date + id: date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Create pull request + if: ${{ ! env.ACT }} + id: pull-request + uses: peter-evans/create-pull-request@v6.1.0 + with: + author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> + base: develop + body: | + :warning: In order for CI to run on this PR it needs to be manually closed and re-opened :warning: + + ### :arrow_heading_up: Summary + - Update Python dependencies from ${{ github.sha }} on ${{ steps.date.outputs.date }} + + ### :closed_umbrella: Related issues + None + + ### :microscope: Tests + Package versions only + branch: python-dependencies + commit-message: ":arrow_up: Update Python dependencies" + committer: GitHub Actions + delete-branch: true + draft: false + labels: | + affected: developers + severity: minor + type: enhancement + title: ":arrow_up: Update Python dependencies" diff --git a/.gitignore b/.gitignore index 39e3f1e981..15607a9be1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # System files *.DS_Store +__pycache__ # All secrets folders, no matter how nested secrets/ @@ -13,6 +14,7 @@ environment_configs/package_lists/dependency-cache.json # Python build caches __pycache__/ +.venv/ # Development tools .vscode @@ -35,6 +37,16 @@ docs/venv # Files produced during testing .mustache_config.json expanded.yaml +.coverage # Lychee cache .lycheecache + +# mypy cache +.mypy_cache + +# ruff cache +.pytest_cache + +# ruff cache +.ruff_cache diff --git a/.lychee.toml b/.lychee.toml index bd475ee7b3..904e92a58f 100644 --- a/.lychee.toml +++ b/.lychee.toml @@ -90,11 +90,13 @@ glob_ignore_case = false # Exclude URLs and mail addresses from checking (supports regex). # exclude = [ '.*\.github.com\.*' ] exclude = [ + 'doi\.org', # 403 + 'entra.microsoft\.com', # Requires authentication (403) 'github\.com', # Requires authentication (403) - 'turing\.ac\.uk', # DDOS protection 'ipaddressguide\.com', # 403 'opensource\.org', # 403 'portal\.azure\.com', # 403 + 'turing\.ac\.uk', # DDOS protection 'web\.archive\.org', # DDOS protection ] diff --git a/.yamllint.yaml b/.yamllint.yaml index 630e1c48d9..1053d55fe4 100644 --- a/.yamllint.yaml +++ b/.yamllint.yaml @@ -1,11 +1,25 @@ --- +# Some rules here are added for compatibility with ansible-lint +# https://ansible.readthedocs.io/projects/lint/rules/yaml/ extends: default rules: + # Restrict acceptable spaces inside brackets + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 # Remove requirement for comments to start with '# ' as this breaks cloud-init comments: + min-spaces-from-content: 1 require-starting-space: false + # Don't force comments to be indented like content + comments-indentation: false # Remove requirement to start with '---' as this breaks cloud-init document-start: disable # Allow longer lines line-length: max: 400 + # Forbid non-quoted octal values + # https://yamllint.readthedocs.io/en/stable/rules.html#module-yamllint.rules.octal_values + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: true diff --git a/CODEOWNERS b/CODEOWNERS index 98c252fa96..2d21f1caee 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -10,5 +10,7 @@ # We only plan to require code owner review for # main and other branches that may be deployed from. # Note: /dir/ applies to directory and all subdirectories -/deployment/ @martintoreilly @jemrobinson @JimMadge @craddm -/docs/ @martintoreilly @jemrobinson @JimMadge @craddm + +* @alan-turing-institute/data-safe-haven-code-administrators + +/docs/ @alan-turing-institute/data-safe-haven-code-contributors diff --git a/README.md b/README.md index 62d01329cf..ea48d1bb19 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ It was developed as part of the Alan Turing Institute's [Data Safe Havens in the ![Licence](https://img.shields.io/github/license/alan-turing-institute/data-safe-haven) [![Citation](https://img.shields.io/badge/citation-cite%20this%20project-informational)](https://github.com/alan-turing-institute/data-safe-haven/blob/develop/CITATION.cff) -[![All Contributors](https://img.shields.io/badge/all_contributors-47-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-49-orange.svg?style=flat-square)](#contributors-) ## :family: Community & support @@ -53,50 +53,52 @@ See our [Code of Conduct](CODE_OF_CONDUCT.md) and our [Contributor Guide](CONTRI David Beavan
David Beavan

📖 🖋 + David Salvador Jasin
David Salvador Jasin

🐛 📖 Diego Arenas
Diego Arenas

💻 🤔 🖋 Ed Chalstrey
Ed Chalstrey

💻 📖 🐛 🤔 📋 👀 ⚠️ Evelina Gabasova
Evelina Gabasova

🖋 Federico Nanni
Federico Nanni

💻 🐛 📖 🤔 Franz Király
Franz Király

🖋 - Guillaume Noell
Guillaume Noell

📖 🐛 🤔 + Guillaume Noell
Guillaume Noell

📖 🐛 🤔 Helen D Little
Helen D Little

🐛 👀 Helen Sherwood-Taylor
Helen Sherwood-Taylor

🤔 🖋 Jack Roberts
Jack Roberts

💻 🐛 James Cunningham
James Cunningham

💻 📖 🐛 🤔 🖋 James Geddes
James Geddes

🖋 James Hetherington
James Hetherington

📖 🐛 🤔 🔍 📆 📣 📢 🖋 - James Robinson
James Robinson

💻 🖋 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ 📢 + James Robinson
James Robinson

💻 🖋 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ 📢 Jim Madge
Jim Madge

💻 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ + Josh Everett
Josh Everett

🐛 Jules M
Jules M

📖 🤔 🐛 🖋 Kirstie Whitaker
Kirstie Whitaker

🖋 📖 🐛 🤔 🔍 📋 📆 📣 📢 📓 Martin O'Reilly
Martin O'Reilly

💻 🖋 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ 📢 Matt Craddock
Matt Craddock

💻 📖 🐛 🤔 🔍 📋 📣 💬 👀 🛡️ ⚠️ - Oscar T Giles
Oscar T Giles

💻 📖 🤔 - Radka Jersakova
Radka Jersakova

🖋 + Oscar T Giles
Oscar T Giles

💻 📖 🤔 + Radka Jersakova
Radka Jersakova

🖋 Rob Clarke
Rob Clarke

🤔 🐛 💻 📖 🖋 Steven Carlysle-Davies
Steven Carlysle-Davies

💻 🖋 🤔 Tim Hobson
Tim Hobson

💻 🐛 📖 🤔 Tom Doel
Tom Doel

💻 📖 🐛 🤔 🖋 Tomas Lazauskas
Tomas Lazauskas

💻 📖 🐛 🤔 - arielle-bennett
arielle-bennett

🔍 🤔 📆 - bw-faststream
bw-faststream

📖 🤔 🐛 📆 📓 + arielle-bennett
arielle-bennett

🔍 🤔 📆 + bw-faststream
bw-faststream

📖 🤔 🐛 📆 📓 cathiest
cathiest

🖋 📖 🐛 🤔 davsarper
davsarper

📖 📋 🔍 🤔 📆 📣 📢 ens-brett-todd
ens-brett-todd

💻 🤔 ens-george-holmes
ens-george-holmes

💻 🤔 getcarter21
getcarter21

💻 🖋 - harisood
harisood

📖 🐛 🤔 🔍 📋 📆 📣 💬 📢 🛡️ 📓 - kevinxufs
kevinxufs

📖 🤔 🛡️ + harisood
harisood

📖 🐛 🤔 🔍 📋 📆 📣 💬 📢 🛡️ 📓 + kevinxufs
kevinxufs

📖 🤔 🛡️ miguelmorin
miguelmorin

💻 📖 🤔 ⚠️ oforrest
oforrest

📖 🤔 📆 📣 🖋 rwinstanley1
rwinstanley1

📖 🤔 📆 🛡️ diff --git a/ROADMAP.md b/ROADMAP.md index 970243817a..a95c1f6d1e 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,6 +1,6 @@ # Roadmap -Last updated: 2024-03-12 +Last updated: 2024-07-08 ## Background @@ -67,22 +67,6 @@ This may be because they are lower priority than short-term goals or because the These are features we would like to highlight but which we have actively decided not to incorporate into the long- or short-term roadmap. This may be because, for example, we have decided we have insufficient resource, are lacking the right expertise or it is low priority. -### Develop Outside, Run Inside - -A workflow enabling researchers to develop this research and analysis code outside of the TRE, using whatever tools they prefer and are familiar with, and bring this work into the TRE to run against the sensitive data. - -This would improve, - -- Reproducibility (as users would be encouraged and enabled to use public VCS repositories) -- User experience -- User efficiency -- Accuracy of results (as testing code is easier) - -This would involve, - -- A trusted mechanism to bring code into the TRE -- Synthetic or dummy data for testing outside of the TRE - ### User Defined Container Support Enable users to bring containers they have developed outside of the TRE into the TRE to be used in research/analysis. diff --git a/SECURITY.md b/SECURITY.md index 0f6602279d..c045852320 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -7,8 +7,8 @@ All organisations using an earlier version in production should update to the la | Version | Supported | | --------------------------------------------------------------------------------------- | ------------------ | -| [4.2.2](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v4.2.2) | :white_check_mark: | -| < 4.2.2 | :x: | +| [5.0.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.0.0) | :white_check_mark: | +| < 5.0.0 | :x: | ## Reporting a Vulnerability diff --git a/VERSIONING.md b/VERSIONING.md index 41801b06b5..03d9caa972 100644 --- a/VERSIONING.md +++ b/VERSIONING.md @@ -91,8 +91,9 @@ The following versions have been evaluated by third party security consultants p | [v3.3.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v3.3.0) | 15 July 2021 | Penetration test evaluating (1) external attack surface, (2) ability to exfiltrate data from the system, (3) ability to transfer data between SREs, (4) ability to escalate privileges on the SRD. | No major security issues identified. | | [v3.4.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v3.4.0) | 22 April 2022 | Penetration test evaluating (1) external attack surface, (2) ability to exfiltrate data from the system, (3) ability to transfer data between SREs, (4) ability to escalate privileges on the SRD. | No major security issues identified. | | [v4.0.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v4.0.0) | 2 September 2022 | Penetration test evaluating ability to infiltrate/exfiltrate data from the system. | No major security issues identified. | -| [v5.0.0-rc1](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.0.0-rc.1) | 18 September 2023 | Penetration test evaluating ability to infiltrate/exfiltrate data from the system. Testing next codebase, using Python and Pulumi. | No major security issues identified. | +| [v5.0.0-rc1](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.0.0-rc.1) | 18 September 2023 | Penetration test evaluating ability to infiltrate/exfiltrate data from the system. Testing next codebase, using Python and Pulumi. | No major security issues identified. | | [v4.2.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v4.2.0) | 22 March 2024 | Penetration test evaluating ability to infiltrate/exfiltrate data from the system. Repeat tests for v4.0.0 vulnerabilities. | No major security issues identified. | +| [v5.0.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.0.0) | 9 August 2024 | Penetration test evaluating ability to infiltrate/exfiltrate data from the system. | | ## Questions diff --git a/data_safe_haven/__init__.py b/data_safe_haven/__init__.py new file mode 100644 index 0000000000..b13a7ce9ad --- /dev/null +++ b/data_safe_haven/__init__.py @@ -0,0 +1,8 @@ +"""Data Safe Haven""" + +from .logging import init_logging +from .version import __version__, __version_info__ + +init_logging() + +__all__ = ["__version__", "__version_info__"] diff --git a/environment_configs/package_lists/allowlist-extra-python-pypi-tier3.list b/data_safe_haven/administration/__init__.py similarity index 100% rename from environment_configs/package_lists/allowlist-extra-python-pypi-tier3.list rename to data_safe_haven/administration/__init__.py diff --git a/data_safe_haven/administration/users/__init__.py b/data_safe_haven/administration/users/__init__.py new file mode 100644 index 0000000000..257384e46b --- /dev/null +++ b/data_safe_haven/administration/users/__init__.py @@ -0,0 +1,3 @@ +from .user_handler import UserHandler + +__all__ = ["UserHandler"] diff --git a/data_safe_haven/administration/users/entra_users.py b/data_safe_haven/administration/users/entra_users.py new file mode 100644 index 0000000000..8872589190 --- /dev/null +++ b/data_safe_haven/administration/users/entra_users.py @@ -0,0 +1,166 @@ +"""Interact with users in Entra ID.""" + +from collections.abc import Sequence + +from data_safe_haven.exceptions import ( + DataSafeHavenEntraIDError, + DataSafeHavenError, + DataSafeHavenTypeError, +) +from data_safe_haven.external import GraphApi +from data_safe_haven.functions import password +from data_safe_haven.logging import get_logger + +from .research_user import ResearchUser + + +class EntraUsers: + """Interact with users in Entra ID.""" + + def __init__( + self, + graph_api: GraphApi, + ) -> None: + self.graph_api = graph_api + self.logger = get_logger() + + def add(self, new_users: Sequence[ResearchUser]) -> None: + """ + Add list of users to Entra ID + + Raises: + DataSafeHavenEntraIDError if any user could not be created + """ + try: + available_domains = { + domain["id"] + for domain in self.graph_api.read_domains() + if domain["isVerified"] + } + for user in new_users: + if user.domain not in available_domains: + msg = f"Domain '[green]{user.domain}[/]' is not verified." + raise DataSafeHavenTypeError(msg) + request_json = { + "accountEnabled": user.account_enabled, + "displayName": user.display_name, + "givenName": user.given_name, + "surname": user.surname, + "mailNickname": user.username, + "passwordProfile": {"password": password(20)}, + "userPrincipalName": f"{user.username}@{user.domain}", + } + if not user.email_address: + msg = ( + f"User '[green]{user.username}[/]' is missing an email address." + ) + raise DataSafeHavenTypeError(msg) + if not user.phone_number: + msg = f"User '[green]{user.username}[/]' is missing a phone number." + raise DataSafeHavenTypeError(msg) + self.graph_api.create_user( + request_json, user.email_address, user.phone_number + ) + self.logger.info( + f"Ensured user '[green]{user.preferred_username}[/]' exists in Entra ID" + ) + except DataSafeHavenError as exc: + msg = "Unable to add users to Entra ID." + raise DataSafeHavenEntraIDError(msg) from exc + + def list(self) -> Sequence[ResearchUser]: + """ + List available Entra users + + Raises: + DataSafeHavenEntraIDError if users could not be loaded + """ + try: + user_list = self.graph_api.read_users() + return [ + ResearchUser( + account_enabled=user_details["accountEnabled"], + email_address=user_details["mail"], + given_name=user_details["givenName"], + phone_number=( + user_details["businessPhones"][0] + if len(user_details["businessPhones"]) + else None + ), + sam_account_name=( + user_details["onPremisesSamAccountName"] + if user_details["onPremisesSamAccountName"] + else user_details["mailNickname"] + ), + surname=user_details["surname"], + user_principal_name=user_details["userPrincipalName"], + ) + for user_details in user_list + ] + except DataSafeHavenError as exc: + msg = "Unable to list Entra ID users." + raise DataSafeHavenEntraIDError(msg) from exc + + def register(self, sre_name: str, usernames: Sequence[str]) -> None: + """ + Add usernames to SRE group in Entra ID + + Raises: + DataSafeHavenEntraIDError if any user could not be added to the group. + """ + try: + group_name = f"Data Safe Haven SRE {sre_name} Users" + for username in usernames: + self.graph_api.add_user_to_group(username, group_name) + except DataSafeHavenError as exc: + msg = f"Unable to add users to group '{group_name}'." + raise DataSafeHavenEntraIDError(msg) from exc + + def remove(self, users: Sequence[ResearchUser]) -> None: + """ + Remove list of users from Entra ID + + Raises: + DataSafeHavenEntraIDError if any user could not be removed. + """ + try: + for user in filter( + lambda existing_user: any(existing_user == user for user in users), + self.list(), + ): + self.graph_api.remove_user(user.username) + self.logger.info(f"Removed '{user.preferred_username}'.") + except DataSafeHavenError as exc: + msg = "Unable to remove users from Entra ID." + raise DataSafeHavenEntraIDError(msg) from exc + + def set(self, users: Sequence[ResearchUser]) -> None: + """ + Set Entra users to specified list + + Raises: + DataSafeHavenEntraIDError if user list could not be set + """ + try: + users_to_remove = [user for user in self.list() if user not in users] + self.remove(users_to_remove) + users_to_add = [user for user in users if user not in self.list()] + self.add(users_to_add) + except DataSafeHavenError as exc: + msg = "Unable to set desired user list in Entra ID." + raise DataSafeHavenEntraIDError(msg) from exc + + def unregister(self, sre_name: str, usernames: Sequence[str]) -> None: + """ + Remove usernames from SRE group in Entra ID + + Raises: + DataSafeHavenEntraIDError if any user could not be added to the group. + """ + try: + group_name = f"Data Safe Haven SRE {sre_name}" + for username in usernames: + self.graph_api.remove_user_from_group(username, group_name) + except DataSafeHavenError as exc: + msg = f"Unable to remove users from group {group_name}." + raise DataSafeHavenEntraIDError(msg) from exc diff --git a/data_safe_haven/administration/users/guacamole_users.py b/data_safe_haven/administration/users/guacamole_users.py new file mode 100644 index 0000000000..8c0c5381a0 --- /dev/null +++ b/data_safe_haven/administration/users/guacamole_users.py @@ -0,0 +1,66 @@ +import pathlib +from collections.abc import Sequence + +from data_safe_haven.config import Context, DSHPulumiConfig, SREConfig +from data_safe_haven.external import AzurePostgreSQLDatabase, AzureSdk +from data_safe_haven.infrastructure import SREProjectManager + +from .research_user import ResearchUser + + +class GuacamoleUsers: + """Interact with users in a Guacamole database.""" + + def __init__( + self, + context: Context, + config: SREConfig, + pulumi_config: DSHPulumiConfig, + ): + sre_stack = SREProjectManager( + context=context, + config=config, + pulumi_config=pulumi_config, + ) + # Read the SRE database secret from key vault + azure_sdk = AzureSdk(context.subscription_name) + connection_db_server_password = azure_sdk.get_keyvault_secret( + sre_stack.output("data")["key_vault_name"], + sre_stack.output("data")["password_user_database_admin_secret"], + ) + self.postgres_provisioner = AzurePostgreSQLDatabase( + sre_stack.output("remote_desktop")["connection_db_name"], + connection_db_server_password, + sre_stack.output("remote_desktop")["connection_db_server_name"], + sre_stack.output("remote_desktop")["resource_group_name"], + context.subscription_name, + ) + self.users_: Sequence[ResearchUser] | None = None + self.postgres_script_path: pathlib.Path = ( + pathlib.Path(__file__).parent.parent.parent + / "resources" + / "remote_desktop" + / "postgresql" + ) + self.group_name = f"Data Safe Haven SRE {config.name} Users" + + def list(self) -> Sequence[ResearchUser]: + """List all Guacamole users""" + if self.users_ is None: # Allow for the possibility of an empty list of users + postgres_output = self.postgres_provisioner.execute_scripts( + [self.postgres_script_path / "list_users.mustache.sql"], + mustache_values={"group_name": self.group_name}, + ) + # The output is of the form [ + # ["sam_account_name1", "email_address1"], + # ["sam_account_name2", "email_address2"] + # ] + self.users_ = [ + ResearchUser( + sam_account_name=user_details[0].split("@")[0], + user_principal_name=user_details[0], + email_address=user_details[1], + ) + for user_details in postgres_output + ] + return self.users_ diff --git a/data_safe_haven/administration/users/research_user.py b/data_safe_haven/administration/users/research_user.py new file mode 100644 index 0000000000..0c7d8dd630 --- /dev/null +++ b/data_safe_haven/administration/users/research_user.py @@ -0,0 +1,54 @@ +from typing import Any + + +class ResearchUser: + def __init__( + self, + account_enabled: bool | None = None, + country: str | None = None, + domain: str | None = None, + email_address: str | None = None, + given_name: str | None = None, + phone_number: str | None = None, + sam_account_name: str | None = None, + surname: str | None = None, + user_principal_name: str | None = None, + ) -> None: + self.account_enabled = account_enabled + self.country = country + self.domain = domain + self.email_address = email_address + self.given_name = given_name + self.phone_number = phone_number + self.sam_account_name = sam_account_name + self.surname = surname + self.user_principal_name = user_principal_name + + @property + def display_name(self) -> str: + return f"{self.given_name} {self.surname}" + + @property + def preferred_username(self) -> str: + if self.user_principal_name: + return self.user_principal_name + return self.username + + @property + def username(self) -> str: + if self.sam_account_name: + return self.sam_account_name + return f"{self.given_name}.{self.surname}".lower() + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ResearchUser): + return any( + [ + self.username == other.username, + self.preferred_username == other.preferred_username, + ] + ) + return False + + def __str__(self) -> str: + return f"{self.display_name} '{self.username}'." diff --git a/data_safe_haven/administration/users/user_handler.py b/data_safe_haven/administration/users/user_handler.py new file mode 100644 index 0000000000..ac42247e59 --- /dev/null +++ b/data_safe_haven/administration/users/user_handler.py @@ -0,0 +1,219 @@ +import csv +import pathlib +from collections.abc import Sequence + +from data_safe_haven import console +from data_safe_haven.config import Context, DSHPulumiConfig, SREConfig +from data_safe_haven.exceptions import DataSafeHavenUserHandlingError +from data_safe_haven.external import GraphApi +from data_safe_haven.logging import get_logger + +from .entra_users import EntraUsers +from .guacamole_users import GuacamoleUsers +from .research_user import ResearchUser + + +class UserHandler: + def __init__( + self, + context: Context, + graph_api: GraphApi, + ): + self.entra_users = EntraUsers(graph_api) + self.context = context + self.logger = get_logger() + + def add(self, users_csv_path: pathlib.Path, domain: str) -> None: + """Add users to Entra ID and Guacamole + + Raises: + DataSafeHavenUserHandlingError if the users could not be added + """ + try: + # Construct user list + with open(users_csv_path, encoding="utf-8") as f_csv: + dialect = csv.Sniffer().sniff(f_csv.read(), delimiters=";,") + f_csv.seek(0) + reader = csv.DictReader(f_csv, dialect=dialect) + for required_field in [ + "GivenName", + "Surname", + "Phone", + "Email", + "CountryCode", + ]: + if (not reader.fieldnames) or ( + required_field not in reader.fieldnames + ): + msg = f"Missing required CSV field '{required_field}'." + raise ValueError(msg) + users = [ + ResearchUser( + account_enabled=True, + country=user["CountryCode"], + domain=user.get("Domain", domain), + email_address=user["Email"], + given_name=user["GivenName"], + phone_number=user["Phone"], + surname=user["Surname"], + ) + for user in reader + ] + for user in users: + self.logger.debug(f"Processing new user: {user}") + + # Add users to Entra ID + self.entra_users.add(users) + except csv.Error as exc: + msg = f"Could not add users from '{users_csv_path}'." + raise DataSafeHavenUserHandlingError(msg) from exc + + def get_usernames( + self, sre_name: str, pulumi_config: DSHPulumiConfig + ) -> dict[str, list[str]]: + """Load usernames from all sources""" + usernames = {} + usernames["Entra ID"] = self.get_usernames_entra_id() + usernames[f"SRE {sre_name}"] = self.get_usernames_guacamole( + sre_name, + pulumi_config, + ) + return usernames + + def get_usernames_entra_id(self) -> list[str]: + """Load usernames from Entra ID""" + return [user.username for user in self.entra_users.list()] + + def get_usernames_guacamole( + self, sre_name: str, pulumi_config: DSHPulumiConfig + ) -> list[str]: + """Lazy-load usernames from Guacamole""" + try: + sre_config = SREConfig.from_remote_by_name(self.context, sre_name) + guacamole_users = GuacamoleUsers(self.context, sre_config, pulumi_config) + return [user.username for user in guacamole_users.list()] + except Exception: + self.logger.error(f"Could not load users for SRE '{sre_name}'.") + return [] + + def list(self, sre_name: str, pulumi_config: DSHPulumiConfig) -> None: + """List Entra ID and Guacamole users + + Raises: + DataSafeHavenUserHandlingError if the users could not be listed + """ + try: + # Load usernames + usernames = self.get_usernames(sre_name, pulumi_config) + # Fill user information as a table + user_headers = ["username", *list(usernames.keys())] + user_data = [] + for username in sorted( + {name for names in usernames.values() for name in names} + ): + user_memberships = [username] + for category in user_headers[1:]: + user_memberships.append( + "x" if username in usernames[category] else "" + ) + user_data.append(user_memberships) + + console.tabulate(user_headers, user_data) + except Exception as exc: + msg = "Could not list users." + raise DataSafeHavenUserHandlingError(msg) from exc + + def register(self, sre_name: str, user_names: Sequence[str]) -> None: + """Register usernames with SRE + + Raises: + DataSafeHavenUserHandlingError if the users could not be registered in the SRE + """ + try: + # Add users to the SRE security group + self.entra_users.register(sre_name, user_names) + except Exception as exc: + msg = f"Could not register {len(user_names)} users with SRE '{sre_name}'." + raise DataSafeHavenUserHandlingError(msg) from exc + + def remove(self, user_names: Sequence[str]) -> None: + """Remove Entra ID and Guacamole users + + Raises: + DataSafeHavenUserHandlingError if the users could not be removed + """ + try: + # Construct user lists + self.logger.debug(f"Attempting to remove {len(user_names)} user(s).") + entra_users_to_remove = [ + user for user in self.entra_users.list() if user.username in user_names + ] + + # Commit changes + self.logger.debug( + f"Found {len(entra_users_to_remove)} valid user(s) to remove." + ) + self.entra_users.remove(entra_users_to_remove) + except Exception as exc: + msg = f"Could not remove users: {user_names}." + raise DataSafeHavenUserHandlingError(msg) from exc + + def set(self, users_csv_path: str) -> None: + """Set Entra ID and Guacamole users + + Raises: + DataSafeHavenUserHandlingError if the users could not be set to the desired list + """ + try: + # Construct user list + with open(users_csv_path, encoding="utf-8") as f_csv: + reader = csv.DictReader(f_csv) + for required_field in ["GivenName", "Surname", "Phone", "Email"]: + if (not reader.fieldnames) or ( + required_field not in reader.fieldnames + ): + msg = f"Missing required CSV field '{required_field}'." + raise ValueError(msg) + desired_users = [ + ResearchUser( + country="GB", + email_address=user["Email"], + given_name=user["GivenName"], + phone_number=user["Phone"], + surname=user["Surname"], + ) + for user in reader + ] + for user in desired_users: + self.logger.debug(f"Processing user: {user}") + + # Keep existing users with the same username + entra_desired_users = [ + user + for user in self.entra_users.list() + if user.username in [u.username for u in desired_users] + ] + + # Construct list of new users + entra_desired_users = [ + user for user in desired_users if user not in entra_desired_users + ] + + # Commit changes + self.entra_users.set(entra_desired_users) + except Exception as exc: + msg = f"Could not set users from '{users_csv_path}'." + raise DataSafeHavenUserHandlingError(msg) from exc + + def unregister(self, sre_name: str, user_names: Sequence[str]) -> None: + """Unregister usernames with SRE + + Raises: + DataSafeHavenUserHandlingError if the users could not be registered in the SRE + """ + try: + # Remove users from the SRE security group + self.entra_users.unregister(sre_name, user_names) + except Exception as exc: + msg = f"Could not unregister {len(user_names)} users with SRE '{sre_name}'." + raise DataSafeHavenUserHandlingError(msg) from exc diff --git a/data_safe_haven/commands/__init__.py b/data_safe_haven/commands/__init__.py new file mode 100644 index 0000000000..edc9b8f778 --- /dev/null +++ b/data_safe_haven/commands/__init__.py @@ -0,0 +1,3 @@ +from .cli import application + +__all__ = ["application"] diff --git a/data_safe_haven/commands/cli.py b/data_safe_haven/commands/cli.py new file mode 100644 index 0000000000..29c6dcd0f7 --- /dev/null +++ b/data_safe_haven/commands/cli.py @@ -0,0 +1,99 @@ +"""Command line entrypoint for Data Safe Haven application""" + +from typing import Annotated, Optional + +import typer + +from data_safe_haven import __version__, console +from data_safe_haven.logging import set_console_level, show_console_level + +from .config import config_command_group +from .context import context_command_group +from .pulumi import pulumi_command_group +from .shm import shm_command_group +from .sre import sre_command_group +from .users import users_command_group + +# Create the application +application = typer.Typer( + context_settings={"help_option_names": ["-h", "--help"]}, + invoke_without_command=True, + name="dsh", + no_args_is_help=True, +) + + +# Custom application callback +# This is executed before +@application.callback() +def callback( + verbose: Annotated[ # noqa: FBT002 + bool, + typer.Option( + "--verbose", + "-v", + help="Increase the verbosity of console output.", + ), + ] = False, + show_level: Annotated[ # noqa: FBT002 + bool, + typer.Option( + "--show-level", + "-l", + help="Show Log level.", + ), + ] = False, + version: Annotated[ + Optional[bool], # noqa: UP007 + typer.Option( + "--version", "-V", help="Display the version of this application." + ), + ] = None, +) -> None: + """Arguments to the main executable""" + + if verbose: + set_console_level("DEBUG") + + if show_level: + show_console_level() + + if version: + console.print(f"Data Safe Haven {__version__}") + raise typer.Exit() + + +# Register command groups +application.add_typer( + users_command_group, + name="users", + help="Manage the users of a Data Safe Haven deployment.", +) +application.add_typer( + config_command_group, + name="config", + help="Manage Data Safe Haven configuration.", +) +application.add_typer( + context_command_group, name="context", help="Manage Data Safe Haven contexts." +) +application.add_typer( + pulumi_command_group, + name="pulumi", + help="(Advanced) interact directly with the Pulumi CLI.", +) +application.add_typer( + shm_command_group, + name="shm", + help="Manage Data Safe Haven SHM infrastructure.", +) +application.add_typer( + sre_command_group, + name="sre", + help="Manage Data Safe Haven SRE infrastructure.", +) + + +def main() -> None: + """Run the application""" + application() diff --git a/data_safe_haven/commands/config.py b/data_safe_haven/commands/config.py new file mode 100644 index 0000000000..ac6ba92dc1 --- /dev/null +++ b/data_safe_haven/commands/config.py @@ -0,0 +1,148 @@ +"""Command group and entrypoints for managing DSH configuration""" + +from pathlib import Path +from typing import Annotated, Optional + +import typer + +from data_safe_haven import console +from data_safe_haven.config import ContextManager, SHMConfig, SREConfig +from data_safe_haven.exceptions import ( + DataSafeHavenAzureStorageError, + DataSafeHavenConfigError, + DataSafeHavenError, +) +from data_safe_haven.logging import get_logger + +config_command_group = typer.Typer() + + +# Commands related to an SHM +@config_command_group.command() +def show_shm( + file: Annotated[ + Optional[Path], # noqa: UP007 + typer.Option(help="File path to write configuration template to."), + ] = None +) -> None: + """Print the SHM configuration for the selected Data Safe Haven context""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + except DataSafeHavenConfigError as exc: + logger.critical( + "No context is selected. Use `dsh context add` to create a context " + "or `dsh context switch` to select one." + ) + raise typer.Exit(1) from exc + try: + config = SHMConfig.from_remote(context) + except DataSafeHavenError as exc: + logger.critical( + "SHM must be deployed before its configuration can be displayed." + ) + raise typer.Exit(1) from exc + config_yaml = config.to_yaml() + if file: + with open(file, "w") as outfile: + outfile.write(config_yaml) + else: + console.print(config_yaml) + + +# Commands related to an SRE +@config_command_group.command() +def show( + name: Annotated[str, typer.Argument(help="Name of SRE to show")], + file: Annotated[ + Optional[Path], # noqa: UP007 + typer.Option(help="File path to write configuration template to."), + ] = None, +) -> None: + """Print the SRE configuration for the selected SRE and Data Safe Haven context""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + except DataSafeHavenConfigError as exc: + logger.critical( + "No context is selected. Use `dsh context add` to create a context " + "or `dsh context switch` to select one." + ) + raise typer.Exit(1) from exc + try: + sre_config = SREConfig.from_remote_by_name(context, name) + except DataSafeHavenAzureStorageError as exc: + logger.critical("Ensure SHM is deployed before attempting to use SRE configs.") + raise typer.Exit(1) from exc + except DataSafeHavenError as exc: + logger.critical( + f"No configuration exists for an SRE named '{name}' for the selected context." + ) + raise typer.Exit(1) from exc + config_yaml = sre_config.to_yaml() + if file: + with open(file, "w") as outfile: + outfile.write(config_yaml) + else: + console.print(config_yaml) + + +@config_command_group.command() +def template( + file: Annotated[ + Optional[Path], # noqa: UP007 + typer.Option(help="File path to write configuration template to."), + ] = None +) -> None: + """Write a template Data Safe Haven SRE configuration.""" + sre_config = SREConfig.template() + # The template uses explanatory strings in place of the expected types. + # Serialisation warnings are therefore suppressed to avoid misleading the users into + # thinking there is a problem and contaminating the output. + config_yaml = sre_config.to_yaml(warnings=False) + if file: + with open(file, "w") as outfile: + outfile.write(config_yaml) + else: + console.print(config_yaml) + + +@config_command_group.command() +def upload( + file: Annotated[Path, typer.Argument(help="Path to configuration file")], +) -> None: + """Upload an SRE configuration to the Data Safe Haven context""" + context = ContextManager.from_file().assert_context() + logger = get_logger() + + # Create configuration object from file + if file.is_file(): + with open(file) as config_file: + config_yaml = config_file.read() + else: + logger.critical(f"Configuration file '{file}' not found.") + raise typer.Exit(1) + config = SREConfig.from_yaml(config_yaml) + + # Present diff to user + if SREConfig.remote_exists(context, filename=config.filename): + if diff := config.remote_yaml_diff(context, filename=config.filename): + for line in "".join(diff).splitlines(): + logger.info(line) + if not console.confirm( + ( + "Configuration has changed, " + "do you want to overwrite the remote configuration?" + ), + default_to_yes=False, + ): + raise typer.Exit() + else: + console.print("No changes, won't upload configuration.") + raise typer.Exit() + + try: + config.upload(context, filename=config.filename) + except DataSafeHavenError as exc: + logger.critical("No infrastructure found for the selected context.") + raise typer.Exit(1) from exc diff --git a/data_safe_haven/commands/context.py b/data_safe_haven/commands/context.py new file mode 100644 index 0000000000..df99d1a2de --- /dev/null +++ b/data_safe_haven/commands/context.py @@ -0,0 +1,186 @@ +"""Command group and entrypoints for managing a DSH context""" + +from typing import Annotated, Optional + +import typer + +from data_safe_haven import console, validators +from data_safe_haven.config import ContextManager +from data_safe_haven.exceptions import DataSafeHavenConfigError +from data_safe_haven.logging import get_logger + +context_command_group = typer.Typer() + + +@context_command_group.command() +def show() -> None: + """Show information about the currently selected context.""" + logger = get_logger() + try: + manager = ContextManager.from_file() + except DataSafeHavenConfigError as exc: + logger.critical( + "No context configuration file. Use `dsh context add` to create one." + ) + raise typer.Exit(code=1) from exc + + current_context_name = manager.selected + current_context = manager.context + + console.print(f"Current context: [green]{current_context_name}[/]") + if current_context is not None: + console.print( + f"\tAdmin group name: [blue]{current_context.admin_group_name}[/]", + f"\tDescription: [blue]{current_context.description}[/]", + f"\tSubscription name: [blue]{current_context.subscription_name}[/]", + sep="\n", + ) + + +@context_command_group.command() +def available() -> None: + """Show the available contexts.""" + logger = get_logger() + try: + manager = ContextManager.from_file() + except DataSafeHavenConfigError as exc: + logger.critical( + "No context configuration file. Use `dsh context add` to create one." + ) + raise typer.Exit(code=1) from exc + + current_context_name = manager.selected + available = manager.available + + if current_context_name is not None: + available.remove(current_context_name) + available = [f"[green]{current_context_name}*[/]", *available] + + console.print("\n".join(available)) + + +@context_command_group.command() +def switch( + name: Annotated[str, typer.Argument(help="Name of the context to switch to.")] +) -> None: + """Switch the currently selected context.""" + logger = get_logger() + try: + manager = ContextManager.from_file() + except DataSafeHavenConfigError as exc: + logger.critical( + "No context configuration file. Use `dsh context add` to create one." + ) + raise typer.Exit(code=1) from exc + manager.selected = name + manager.write() + + +@context_command_group.command() +def add( + admin_group_name: Annotated[ + str, + typer.Option( + help="Name of a security group that contains all Azure infrastructure admins.", + callback=validators.typer_entra_group_name, + ), + ], + description: Annotated[ + str, + typer.Option( + help="The human-friendly name to give this Data Safe Haven deployment.", + ), + ], + name: Annotated[ + str, + typer.Option( + help="A name for this context which consists only of letters, numbers and underscores.", + ), + ], + subscription_name: Annotated[ + str, + typer.Option( + help="The name of an Azure subscription to deploy resources into.", + callback=validators.typer_azure_subscription_name, + ), + ], +) -> None: + """Add a new context to the context manager.""" + # Create a new context settings file if none exists + if ContextManager.default_config_file_path().exists(): + manager = ContextManager.from_file() + else: + manager = ContextManager(contexts={}, selected=None) + # Add the context to the file and write it + manager.add( + admin_group_name=admin_group_name, + description=description, + name=name, + subscription_name=subscription_name, + ) + manager.selected = name + manager.write() + + +@context_command_group.command() +def update( + admin_group_name: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="Name of a security group that contains all Azure infrastructure admins.", + callback=validators.typer_entra_group_name, + ), + ] = None, + description: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="The human friendly name to give this Data Safe Haven deployment.", + ), + ] = None, + name: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="A name for this context which consists only of letters, numbers and underscores.", + callback=validators.typer_safe_string, + ), + ] = None, + subscription: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="The name of an Azure subscription to deploy resources into.", + callback=validators.typer_azure_subscription_name, + ), + ] = None, +) -> None: + """Update the currently selected context.""" + logger = get_logger() + try: + manager = ContextManager.from_file() + except DataSafeHavenConfigError as exc: + logger.critical( + "No context configuration file. Use `dsh context add` to create one." + ) + raise typer.Exit(1) from exc + + manager.update( + admin_group_name=admin_group_name, + description=description, + name=name, + subscription_name=subscription, + ) + manager.write() + + +@context_command_group.command() +def remove( + name: Annotated[str, typer.Argument(help="Name of the context to remove.")], +) -> None: + """Removes a context from the the context manager.""" + logger = get_logger() + try: + manager = ContextManager.from_file() + except DataSafeHavenConfigError as exc: + logger.critical("No context configuration file.") + raise typer.Exit(1) from exc + manager.remove(name) + manager.write() diff --git a/data_safe_haven/commands/pulumi.py b/data_safe_haven/commands/pulumi.py new file mode 100644 index 0000000000..7ad9506f0b --- /dev/null +++ b/data_safe_haven/commands/pulumi.py @@ -0,0 +1,57 @@ +"""Interact with the Pulumi CLI using DSH projects""" + +from enum import UNIQUE, StrEnum, auto, verify +from typing import Annotated + +import typer + +from data_safe_haven import console +from data_safe_haven.config import ContextManager, DSHPulumiConfig, SHMConfig, SREConfig +from data_safe_haven.external import GraphApi +from data_safe_haven.infrastructure import SREProjectManager + +pulumi_command_group = typer.Typer() + + +@verify(UNIQUE) +class ProjectType(StrEnum): + SHM = auto() + SRE = auto() + + +@pulumi_command_group.command() +def run( + sre_name: Annotated[ + str, + typer.Argument(help="SRE name"), + ], + command: Annotated[ + str, + typer.Argument(help="Pulumi command to run, e.g. refresh"), + ], +) -> None: + """Run arbitrary Pulumi commands in a DSH project""" + context = ContextManager.from_file().assert_context() + pulumi_config = DSHPulumiConfig.from_remote(context) + shm_config = SHMConfig.from_remote(context) + sre_config = SREConfig.from_remote_by_name(context, sre_name) + + graph_api = GraphApi.from_scopes( + scopes=[ + "Application.ReadWrite.All", + "AppRoleAssignment.ReadWrite.All", + "Directory.ReadWrite.All", + "Group.ReadWrite.All", + ], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + project = SREProjectManager( + context=context, + config=sre_config, + pulumi_config=pulumi_config, + graph_api_token=graph_api.token, + ) + + stdout = project.run_pulumi_command(command) + console.print(stdout) diff --git a/data_safe_haven/commands/shm.py b/data_safe_haven/commands/shm.py new file mode 100644 index 0000000000..b6694c7daa --- /dev/null +++ b/data_safe_haven/commands/shm.py @@ -0,0 +1,148 @@ +"""Command-line application for managing SHM infrastructure.""" + +from typing import Annotated, Optional + +import typer + +from data_safe_haven import console +from data_safe_haven.config import ContextManager, SHMConfig +from data_safe_haven.exceptions import ( + DataSafeHavenAzureAPIAuthenticationError, + DataSafeHavenConfigError, + DataSafeHavenError, +) +from data_safe_haven.infrastructure import ImperativeSHM +from data_safe_haven.logging import get_logger +from data_safe_haven.validators import typer_aad_guid, typer_fqdn + +shm_command_group = typer.Typer() + + +@shm_command_group.command() +def deploy( + entra_tenant_id: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="Tenant ID for the Entra ID used to manage TRE users.", + callback=typer_aad_guid, + ), + ] = None, + fqdn: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="Domain name you want your TRE to be accessible at.", + callback=typer_fqdn, + ), + ] = None, + location: Annotated[ + Optional[str], # noqa: UP007 + typer.Option( + help="The Azure location to deploy resources into.", + ), + ] = None, +) -> None: + """Deploy a Safe Haven Management environment.""" + logger = get_logger() + + # Load selected context + try: + context = ContextManager.from_file().assert_context() + except DataSafeHavenConfigError as exc: + if exc.args[0] == "No context selected": + logger.critical( + "No context selected. Use `dsh context switch` to select one." + ) + else: + logger.critical( + "No context configuration file. Use `dsh context add` before creating infrastructure." + ) + raise typer.Exit(1) from exc + + # Load SHM config from remote if it exists or locally if not + try: + if SHMConfig.remote_exists(context): + config = SHMConfig.from_remote(context) + # If command line arguments conflict with the remote version then present diff + if fqdn: + config.shm.fqdn = fqdn + if entra_tenant_id: + config.shm.entra_tenant_id = entra_tenant_id + if location: + config.azure.location = location + if diff := config.remote_yaml_diff(context): + logger = get_logger() + for line in "".join(diff).splitlines(): + logger.info(line) + if not console.confirm( + ( + "Configuration has changed, " + "do you want to overwrite the remote configuration?" + ), + default_to_yes=False, + ): + raise typer.Exit(0) + else: + if not entra_tenant_id: + logger.critical( + "You must provide the --entra-tenant-id argument when first deploying an SHM." + ) + raise typer.Exit(1) + if not fqdn: + logger.critical( + "You must provide the --fqdn argument when first deploying an SHM." + ) + raise typer.Exit(1) + if not location: + logger.critical( + "You must provide the --location argument when first deploying an SHM." + ) + raise typer.Exit(1) + config = SHMConfig.from_args( + context, + entra_tenant_id=entra_tenant_id, + fqdn=fqdn, + location=location, + ) + except DataSafeHavenError as exc: + msg = "Failed to load SHM configuration." + logger.critical(msg) + raise typer.Exit(1) from exc + + # Create Data Safe Haven SHM infrastructure. + try: + shm_infra = ImperativeSHM(context, config) + shm_infra.deploy() + except DataSafeHavenAzureAPIAuthenticationError as exc: + msg = "Failed to authenticate with the Azure API. You may not be logged into the Azure CLI, or your login may have expired. Try running `az login`." + logger.critical(msg) + raise typer.Exit(1) from exc + except DataSafeHavenError as exc: + msg = "Failed to deploy Data Safe Haven infrastructure." + logger.critical(msg) + raise typer.Exit(1) from exc + # Upload config file to blob storage + config.upload(context) + + +@shm_command_group.command() +def teardown() -> None: + """Tear down a deployed a Safe Haven Management environment.""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + except DataSafeHavenConfigError as exc: + if exc.args[0] == "No context selected": + msg = "No context selected. Use `dsh context switch` to select one." + else: + msg = "No context configuration file. Use `dsh context add` before creating infrastructure." + logger.critical(msg) + raise typer.Exit(code=1) from exc + + # Teardown Data Safe Haven SHM infrastructure. + try: + config = SHMConfig.from_remote(context) + shm_infra = ImperativeSHM(context, config) + shm_infra.teardown() + except DataSafeHavenError as exc: + logger.critical("Could not teardown Safe Haven Management environment.") + raise typer.Exit(1) from exc diff --git a/data_safe_haven/commands/sre.py b/data_safe_haven/commands/sre.py new file mode 100644 index 0000000000..de0d96f6b2 --- /dev/null +++ b/data_safe_haven/commands/sre.py @@ -0,0 +1,182 @@ +"""Command-line application for managing SRE infrastructure.""" + +from typing import Annotated + +import typer + +from data_safe_haven.config import ContextManager, DSHPulumiConfig, SHMConfig, SREConfig +from data_safe_haven.exceptions import DataSafeHavenConfigError, DataSafeHavenError +from data_safe_haven.external import GraphApi +from data_safe_haven.functions import current_ip_address, ip_address_in_list +from data_safe_haven.infrastructure import SREProjectManager +from data_safe_haven.logging import get_logger +from data_safe_haven.provisioning import SREProvisioningManager + +sre_command_group = typer.Typer() + + +@sre_command_group.command() +def deploy( + name: Annotated[str, typer.Argument(help="Name of SRE to deploy")], + force: Annotated[ # noqa: FBT002 + bool, + typer.Option( + "--force", + "-f", + help="Force this operation, cancelling any others that are in progress.", + ), + ] = False, +) -> None: + """Deploy a Secure Research Environment""" + logger = get_logger() + try: + # Load context and SHM config + context = ContextManager.from_file().assert_context() + shm_config = SHMConfig.from_remote(context) + + # Load GraphAPI + graph_api = GraphApi.from_scopes( + scopes=[ + "Application.ReadWrite.All", + "AppRoleAssignment.ReadWrite.All", + "Directory.ReadWrite.All", + "Group.ReadWrite.All", + ], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + # Load Pulumi and SRE configs + pulumi_config = DSHPulumiConfig.from_remote_or_create( + context, encrypted_key=None, projects={} + ) + sre_config = SREConfig.from_remote_by_name(context, name) + + # Check whether current IP address is authorised to take administrator actions + if not ip_address_in_list(sre_config.sre.admin_ip_addresses): + logger.warning( + f"IP address '{current_ip_address()}' is not authorised to deploy SRE '{sre_config.description}'." + ) + msg = "Check that 'admin_ip_addresses' is set correctly in your SRE config file." + raise DataSafeHavenConfigError(msg) + + # Initialise Pulumi stack + # Note that requesting a GraphApi token will trigger possible user-interaction + stack = SREProjectManager( + context=context, + config=sre_config, + pulumi_config=pulumi_config, + create_project=True, + graph_api_token=graph_api.token, + ) + # Set Azure options + stack.add_option( + "azure-native:location", sre_config.azure.location, replace=False + ) + stack.add_option( + "azure-native:subscriptionId", + sre_config.azure.subscription_id, + replace=False, + ) + stack.add_option( + "azure-native:tenantId", sre_config.azure.tenant_id, replace=False + ) + # Load SHM outputs + stack.add_option( + "shm-admin-group-id", + shm_config.shm.admin_group_id, + replace=True, + ) + stack.add_option( + "shm-entra-tenant-id", + shm_config.shm.entra_tenant_id, + replace=True, + ) + stack.add_option( + "shm-fqdn", + shm_config.shm.fqdn, + replace=True, + ) + + # Deploy Azure infrastructure with Pulumi + try: + stack.deploy(force=force) + finally: + # Upload Pulumi config to blob storage + pulumi_config.upload(context) + + # Provision SRE with anything that could not be done in Pulumi + manager = SREProvisioningManager( + graph_api_token=graph_api.token, + location=sre_config.azure.location, + sre_name=sre_config.name, + sre_stack=stack, + subscription_name=context.subscription_name, + timezone=sre_config.sre.timezone, + ) + manager.run() + except DataSafeHavenError as exc: + logger.critical( + f"Could not deploy Secure Research Environment '[green]{name}[/]'." + ) + raise typer.Exit(code=1) from exc + + +@sre_command_group.command() +def teardown( + name: Annotated[str, typer.Argument(help="Name of SRE to teardown.")], + force: Annotated[ # noqa: FBT002 + bool, + typer.Option( + "--force", + "-f", + help="Force this operation, cancelling any others that are in progress.", + ), + ] = False, +) -> None: + """Tear down a deployed a Secure Research Environment.""" + logger = get_logger() + try: + # Load context and SHM config + context = ContextManager.from_file().assert_context() + shm_config = SHMConfig.from_remote(context) + + # Load GraphAPI as this may require user-interaction + graph_api = GraphApi.from_scopes( + scopes=["Application.ReadWrite.All", "Group.ReadWrite.All"], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + # Load Pulumi and SRE configs + pulumi_config = DSHPulumiConfig.from_remote(context) + sre_config = SREConfig.from_remote_by_name(context, name) + + # Check whether current IP address is authorised to take administrator actions + if not ip_address_in_list(sre_config.sre.admin_ip_addresses): + logger.warning( + f"IP address '{current_ip_address()}' is not authorised to teardown SRE '{sre_config.description}'." + ) + msg = "Check that 'admin_ip_addresses' is set correctly in your SRE config file." + raise DataSafeHavenConfigError(msg) + + # Remove infrastructure deployed with Pulumi + # N.B. We allow the creation of a project (which is immediately removed) + # to stop Pulumi operations from crashing due to a missing stack + stack = SREProjectManager( + context=context, + config=sre_config, + pulumi_config=pulumi_config, + graph_api_token=graph_api.token, + create_project=True, + ) + stack.teardown(force=force) + + # Remove Pulumi project from Pulumi config file + del pulumi_config[name] + + # Upload Pulumi config to blob storage + pulumi_config.upload(context) + except DataSafeHavenError as exc: + logger.critical( + f"Could not teardown Secure Research Environment '[green]{name}[/]'." + ) + raise typer.Exit(1) from exc diff --git a/data_safe_haven/commands/users.py b/data_safe_haven/commands/users.py new file mode 100644 index 0000000000..e250bc9fd5 --- /dev/null +++ b/data_safe_haven/commands/users.py @@ -0,0 +1,268 @@ +"""Command-line application for performing user management tasks.""" + +import pathlib +from typing import Annotated + +import typer + +from data_safe_haven.administration.users import UserHandler +from data_safe_haven.config import ContextManager, DSHPulumiConfig, SHMConfig, SREConfig +from data_safe_haven.exceptions import DataSafeHavenError +from data_safe_haven.external import GraphApi +from data_safe_haven.logging import get_logger + +users_command_group = typer.Typer() + + +@users_command_group.command() +def add( + csv: Annotated[ + pathlib.Path, + typer.Argument( + help="A CSV file containing details of users to add.", + ), + ], +) -> None: + """Add users to a deployed Data Safe Haven.""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + + # Load SHMConfig + try: + shm_config = SHMConfig.from_remote(context) + except DataSafeHavenError: + logger.error("Have you deployed the SHM?") + raise + + # Load GraphAPI + graph_api = GraphApi.from_scopes( + scopes=[ + "Group.Read.All", + "User.ReadWrite.All", + "UserAuthenticationMethod.ReadWrite.All", + ], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + # Add users to SHM + users = UserHandler(context, graph_api) + users.add(csv, shm_config.shm.fqdn) + except DataSafeHavenError as exc: + logger.critical("Could not add users to Data Safe Haven.") + raise typer.Exit(1) from exc + + +@users_command_group.command("list") +def list_users( + sre: Annotated[ + str, + typer.Argument( + help="The name of the SRE to list users from.", + ), + ], +) -> None: + """List users from a deployed Data Safe Haven.""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + + # Load SHMConfig + try: + shm_config = SHMConfig.from_remote(context) + except DataSafeHavenError: + logger.error("Have you deployed the SHM?") + raise + + # Load GraphAPI + graph_api = GraphApi.from_scopes( + scopes=["Directory.Read.All", "Group.Read.All"], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + # Load Pulumi config + pulumi_config = DSHPulumiConfig.from_remote(context) + + # List users from all sources + users = UserHandler(context, graph_api) + users.list(sre, pulumi_config) + except DataSafeHavenError as exc: + logger.critical("Could not list Data Safe Haven users.") + raise typer.Exit(1) from exc + + +@users_command_group.command() +def register( + usernames: Annotated[ + list[str], + typer.Option( + "--username", + "-u", + help="Username of a user to register with this SRE. [*may be specified several times*]", + ), + ], + sre: Annotated[ + str, + typer.Argument( + help="The name of the SRE to add the users to.", + ), + ], +) -> None: + """Register existing users with a deployed SRE.""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + + # Load SHMConfig + try: + shm_config = SHMConfig.from_remote(context) + except DataSafeHavenError: + logger.error("Have you deployed the SHM?") + raise + + # Load Pulumi config + pulumi_config = DSHPulumiConfig.from_remote(context) + + # Load SREConfig + sre_config = SREConfig.from_remote_by_name(context, sre) + if sre_config.name not in pulumi_config.project_names: + msg = f"Could not load Pulumi settings for '{sre_config.name}'. Have you deployed the SRE?" + logger.error(msg) + raise DataSafeHavenError(msg) + + # Load GraphAPI + graph_api = GraphApi.from_scopes( + scopes=["Group.ReadWrite.All", "GroupMember.ReadWrite.All"], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + logger.debug( + f"Preparing to register {len(usernames)} user(s) with SRE '{sre_config.name}'" + ) + + # List users + users = UserHandler(context, graph_api) + available_usernames = users.get_usernames_entra_id() + usernames_to_register = [] + for username in usernames: + if username in available_usernames: + usernames_to_register.append(username) + else: + logger.error( + f"Username '{username}' does not belong to this Data Safe Haven deployment." + " Please use 'dsh users add' to create it." + ) + users.register(sre_config.name, usernames_to_register) + except DataSafeHavenError as exc: + logger.critical(f"Could not register Data Safe Haven users with SRE '{sre}'.") + raise typer.Exit(1) from exc + + +@users_command_group.command() +def remove( + usernames: Annotated[ + list[str], + typer.Option( + "--username", + "-u", + help="Username of a user to remove from this Data Safe Haven. [*may be specified several times*]", + ), + ], +) -> None: + """Remove existing users from a deployed Data Safe Haven.""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + + # Load SHMConfig + try: + shm_config = SHMConfig.from_remote(context) + except DataSafeHavenError: + logger.error("Have you deployed the SHM?") + raise + + # Load GraphAPI + graph_api = GraphApi.from_scopes( + scopes=["User.ReadWrite.All"], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + # Remove users from SHM + if usernames: + users = UserHandler(context, graph_api) + users.remove(usernames) + except DataSafeHavenError as exc: + logger.critical("Could not remove users from Data Safe Haven.") + raise typer.Exit(1) from exc + + +@users_command_group.command() +def unregister( + usernames: Annotated[ + list[str], + typer.Option( + "--username", + "-u", + help="Username of a user to unregister from this SRE. [*may be specified several times*]", + ), + ], + sre: Annotated[ + str, + typer.Argument( + help="The name of the SRE to unregister the users from.", + ), + ], +) -> None: + """Unregister existing users from a deployed SRE.""" + logger = get_logger() + try: + context = ContextManager.from_file().assert_context() + + # Load SHMConfig + try: + shm_config = SHMConfig.from_remote(context) + except DataSafeHavenError: + logger.error("Have you deployed the SHM?") + raise + + # Load Pulumi config + pulumi_config = DSHPulumiConfig.from_remote(context) + + # Load SREConfig + sre_config = SREConfig.from_remote_by_name(context, sre) + if sre_config.name not in pulumi_config.project_names: + msg = f"Could not load Pulumi settings for '{sre_config.name}'. Have you deployed the SRE?" + logger.error(msg) + raise DataSafeHavenError(msg) + + # Load GraphAPI + graph_api = GraphApi.from_scopes( + scopes=["Group.ReadWrite.All", "GroupMember.ReadWrite.All"], + tenant_id=shm_config.shm.entra_tenant_id, + ) + + logger.debug( + f"Preparing to unregister {len(usernames)} users with SRE '{sre_config.name}'" + ) + + # List users + users = UserHandler(context, graph_api) + available_usernames = users.get_usernames_entra_id() + usernames_to_unregister = [] + for username in usernames: + if username in available_usernames: + usernames_to_unregister.append(username) + else: + logger.error( + f"Username '{username}' does not belong to this Data Safe Haven deployment." + " Please use 'dsh users add' to create it." + ) + for group_name in ( + f"{sre_config.name} Users", + f"{sre_config.name} Privileged Users", + f"{sre_config.name} Administrators", + ): + users.unregister(group_name, usernames_to_unregister) + except DataSafeHavenError as exc: + logger.critical(f"Could not unregister Data Safe Haven users from SRE '{sre}'.") + raise typer.Exit(1) from exc diff --git a/data_safe_haven/config/__init__.py b/data_safe_haven/config/__init__.py new file mode 100644 index 0000000000..b78acd6ab1 --- /dev/null +++ b/data_safe_haven/config/__init__.py @@ -0,0 +1,15 @@ +from .context import Context +from .context_manager import ContextManager +from .dsh_pulumi_config import DSHPulumiConfig +from .dsh_pulumi_project import DSHPulumiProject +from .shm_config import SHMConfig +from .sre_config import SREConfig + +__all__ = [ + "Context", + "ContextManager", + "DSHPulumiConfig", + "DSHPulumiProject", + "SHMConfig", + "SREConfig", +] diff --git a/data_safe_haven/config/config_sections.py b/data_safe_haven/config/config_sections.py new file mode 100644 index 0000000000..c1385c3328 --- /dev/null +++ b/data_safe_haven/config/config_sections.py @@ -0,0 +1,61 @@ +"""Sections for use in configuration files""" + +from __future__ import annotations + +from pydantic import BaseModel, Field + +from data_safe_haven.types import ( + AzureLocation, + AzureVmSku, + DatabaseSystem, + EmailAddress, + Fqdn, + Guid, + IpAddress, + SafeString, + SoftwarePackageCategory, + TimeZone, + UniqueList, +) + + +class ConfigSectionAzure(BaseModel, validate_assignment=True): + location: AzureLocation + subscription_id: Guid + tenant_id: Guid + + +class ConfigSectionDockerHub(BaseModel, validate_assignment=True): + access_token: SafeString + username: SafeString + + +class ConfigSectionSHM(BaseModel, validate_assignment=True): + admin_group_id: Guid + entra_tenant_id: Guid + fqdn: Fqdn + + +class ConfigSubsectionRemoteDesktopOpts(BaseModel, validate_assignment=True): + allow_copy: bool = False + allow_paste: bool = False + + +class ConfigSectionSRE(BaseModel, validate_assignment=True): + admin_email_address: EmailAddress + admin_ip_addresses: list[IpAddress] = Field(..., default_factory=list[IpAddress]) + databases: UniqueList[DatabaseSystem] = Field( + ..., default_factory=list[DatabaseSystem] + ) + data_provider_ip_addresses: list[IpAddress] = Field( + ..., default_factory=list[IpAddress] + ) + remote_desktop: ConfigSubsectionRemoteDesktopOpts = Field( + ..., default_factory=ConfigSubsectionRemoteDesktopOpts + ) + research_user_ip_addresses: list[IpAddress] = Field( + ..., default_factory=list[IpAddress] + ) + software_packages: SoftwarePackageCategory = SoftwarePackageCategory.NONE + timezone: TimeZone = "Etc/UTC" + workspace_skus: list[AzureVmSku] = Field(..., default_factory=list[AzureVmSku]) diff --git a/data_safe_haven/config/context.py b/data_safe_haven/config/context.py new file mode 100644 index 0000000000..426795bf93 --- /dev/null +++ b/data_safe_haven/config/context.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +from pathlib import Path +from typing import ClassVar + +import yaml +from azure.keyvault.keys import KeyVaultKey +from pydantic import BaseModel + +from data_safe_haven import __version__ +from data_safe_haven.directories import config_dir +from data_safe_haven.external import AzureSdk +from data_safe_haven.functions import alphanumeric +from data_safe_haven.serialisers import ContextBase +from data_safe_haven.types import AzureSubscriptionName, EntraGroupName, SafeString + + +class Context(ContextBase, BaseModel, validate_assignment=True): + admin_group_name: EntraGroupName + description: str + name: SafeString + subscription_name: AzureSubscriptionName + storage_container_name: ClassVar[str] = "config" + pulumi_storage_container_name: ClassVar[str] = "pulumi" + pulumi_encryption_key_name: ClassVar[str] = "pulumi-encryption-key" + + _pulumi_encryption_key = None + + @property + def tags(self) -> dict[str, str]: + return { + "description": self.description, + "project": "Data Safe Haven", + "shm_name": self.name, + "version": __version__, + } + + @property + def work_directory(self) -> Path: + return config_dir() / self.name + + @property + def resource_group_name(self) -> str: + return f"shm-{self.name}-rg" + + @property + def storage_account_name(self) -> str: + # https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview#storage-account-name + # Storage account names must be between 3 and 24 characters in length and may + # contain numbers and lowercase letters only. + return f"shm{alphanumeric(self.name)[:21]}" + + @property + def key_vault_name(self) -> str: + # maximum of 24 characters allowed + return f"shm-{self.name[:17]}-kv" + + @property + def managed_identity_name(self) -> str: + return f"shm-{self.name}-identity-reader" + + @property + def pulumi_backend_url(self) -> str: + return f"azblob://{self.pulumi_storage_container_name}" + + @property + def pulumi_encryption_key(self) -> KeyVaultKey: + if not self._pulumi_encryption_key: + azure_sdk = AzureSdk(subscription_name=self.subscription_name) + self._pulumi_encryption_key = azure_sdk.get_keyvault_key( + key_name=self.pulumi_encryption_key_name, + key_vault_name=self.key_vault_name, + ) + return self._pulumi_encryption_key + + @property + def pulumi_encryption_key_version(self) -> str: + """ID for the Pulumi encryption key""" + key_id: str = self.pulumi_encryption_key.id + return key_id.split("/")[-1] + + @property + def pulumi_secrets_provider_url(self) -> str: + return f"azurekeyvault://{self.key_vault_name}.vault.azure.net/keys/{self.pulumi_encryption_key_name}/{self.pulumi_encryption_key_version}" + + def to_yaml(self) -> str: + return yaml.dump(self.model_dump(), indent=2) diff --git a/data_safe_haven/config/context_manager.py b/data_safe_haven/config/context_manager.py new file mode 100644 index 0000000000..90ed17b77c --- /dev/null +++ b/data_safe_haven/config/context_manager.py @@ -0,0 +1,171 @@ +"""Load global and local settings from dotfiles""" + +# For postponed evaluation of annotations https://peps.python.org/pep-0563 +from __future__ import annotations + +from logging import Logger +from pathlib import Path +from typing import ClassVar + +from pydantic import Field, model_validator + +from data_safe_haven.directories import config_dir +from data_safe_haven.exceptions import DataSafeHavenConfigError, DataSafeHavenValueError +from data_safe_haven.logging import get_logger +from data_safe_haven.serialisers import YAMLSerialisableModel + +from .context import Context + + +class ContextManager(YAMLSerialisableModel): + """Load available and current contexts from YAML files structured as follows: + + selected: acmedeployment + contexts: + acmedeployment: + admin_group_name: Acme Admins + description: Acme Deployment + name: acmedeployment + subscription_name: Data Safe Haven (Acme) + acmetesting: + admin_group_name: Acme Testing Admins + description: Acme Testing + name: acmetesting + subscription_name: Data Safe Haven (Acme Testing) + ... + """ + + config_type: ClassVar[str] = "ContextManager" + selected_: str | None = Field(..., alias="selected") + contexts: dict[str, Context] + logger: ClassVar[Logger] = get_logger() + + @model_validator(mode="after") + def ensure_selected_is_valid(self) -> ContextManager: + if self.selected is not None: + if self.selected not in self.available: + msg = f"Selected context '{self.selected}' is not defined." + raise ValueError(msg) + return self + + @staticmethod + def default_config_file_path() -> Path: + return config_dir() / "contexts.yaml" + + @property + def selected(self) -> str | None: + return self.selected_ + + @selected.setter + def selected(self, name: str | None) -> None: + if name in self.available or name is None: + self.selected_ = name + self.logger.info(f"Switched context to '{name}'.") + else: + msg = f"Context '{name}' is not defined." + raise DataSafeHavenValueError(msg) + + @property + def context(self) -> Context | None: + if self.selected is None: + return None + else: + return self.contexts[self.selected] + + @property + def available(self) -> list[str]: + return list(self.contexts.keys()) + + def assert_context(self) -> Context: + if context := self.context: + return context + else: + msg = "No context selected." + raise DataSafeHavenConfigError(msg) + + def update( + self, + *, + admin_group_name: str | None = None, + description: str | None = None, + name: str | None = None, + subscription_name: str | None = None, + ) -> None: + context = self.assert_context() + + if admin_group_name: + self.logger.debug( + f"Updating admin group name from '{context.admin_group_name}' to '[green]{admin_group_name}[/]'." + ) + context.admin_group_name = admin_group_name + if description: + self.logger.debug( + f"Updating description from '{context.description}' to '[green]{description}[/]'." + ) + context.description = description + if name: + self.logger.debug( + f"Updating name from '{context.name}' to '[green]{name}[/]'." + ) + context.name = name + if subscription_name: + self.logger.debug( + f"Updating subscription name from '{context.subscription_name}' to '[green]{subscription_name}[/]'." + ) + context.subscription_name = subscription_name + + # If the name has changed we also need to change the key + if name: + self.contexts[name] = context + if self.selected: + del self.contexts[self.selected] + self.selected = name + + def add( + self, + *, + admin_group_name: str, + description: str, + name: str, + subscription_name: str, + ) -> None: + # Ensure context is not already present + if name in self.available: + msg = f"A context with name '{name}' is already defined." + raise DataSafeHavenValueError(msg) + + self.logger.info(f"Creating a new context with name '{name}'.") + self.contexts[name] = Context( + admin_group_name=admin_group_name, + description=description, + name=name, + subscription_name=subscription_name, + ) + if not self.selected: + self.selected = name + + def remove(self, name: str) -> None: + if name not in self.available: + msg = f"No context with name '{name}'." + raise DataSafeHavenValueError(msg) + del self.contexts[name] + + # Prevent having a deleted context selected + if name == self.selected: + self.selected = None + + @classmethod + def from_file(cls, config_file_path: Path | None = None) -> ContextManager: + if config_file_path is None: + config_file_path = cls.default_config_file_path() + cls.logger.debug( + f"Reading project settings from '[green]{config_file_path}[/]'." + ) + return cls.from_filepath(config_file_path) + + def write(self, config_file_path: Path | None = None) -> None: + """Write settings to YAML file""" + if config_file_path is None: + config_file_path = self.default_config_file_path() + self.to_filepath(config_file_path) + self.logger.debug(f"Saved context settings to '[green]{config_file_path}[/]'.") diff --git a/data_safe_haven/config/dsh_pulumi_config.py b/data_safe_haven/config/dsh_pulumi_config.py new file mode 100644 index 0000000000..63581d31dd --- /dev/null +++ b/data_safe_haven/config/dsh_pulumi_config.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import ClassVar + +from data_safe_haven.serialisers import AzureSerialisableModel + +from .dsh_pulumi_project import DSHPulumiProject + + +class DSHPulumiConfig(AzureSerialisableModel): + """Serialisable container for multiple DSH Pulumi projects.""" + + config_type: ClassVar[str] = "Pulumi" + default_filename: ClassVar[str] = "pulumi.yaml" + encrypted_key: str | None + projects: dict[str, DSHPulumiProject] + + def __getitem__(self, key: str) -> DSHPulumiProject: + if not isinstance(key, str): + msg = "'key' must be a string." + raise TypeError(msg) + + if key not in self.projects.keys(): + msg = f"No configuration for DSH Pulumi Project {key}." + raise KeyError(msg) + + return self.projects[key] + + def __setitem__(self, key: str, value: DSHPulumiProject) -> None: + """ + Add a DSH Pulumi Project. + This method does not support modifying existing projects. + """ + if not isinstance(key, str): + msg = "'key' must be a string." + raise TypeError(msg) + + if key in self.project_names: + msg = f"Stack {key} already exists." + raise ValueError(msg) + + self.projects[key] = value + + def __delitem__(self, key: str) -> None: + if not isinstance(key, str): + msg = "'key' must be a string." + raise TypeError(msg) + + if key not in self.projects.keys(): + msg = f"No configuration for DSH Pulumi Project {key}." + raise KeyError(msg) + + del self.projects[key] + + @property + def project_names(self) -> list[str]: + """Produce a list of known DSH Pulumi Project names""" + return list(self.projects.keys()) + + def create_or_select_project(self, project_name: str) -> DSHPulumiProject: + if project_name not in self.project_names: + self[project_name] = DSHPulumiProject(stack_config={}) + return self[project_name] diff --git a/data_safe_haven/config/dsh_pulumi_project.py b/data_safe_haven/config/dsh_pulumi_project.py new file mode 100644 index 0000000000..cfc77e7d8f --- /dev/null +++ b/data_safe_haven/config/dsh_pulumi_project.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from typing import Any + +from pydantic import BaseModel + + +class DSHPulumiProject(BaseModel, validate_assignment=True): + """Container for DSH Pulumi Project persistent information""" + + stack_config: dict[str, Any] + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DSHPulumiProject): + return NotImplemented + return self.stack_config == other.stack_config + + def __hash__(self) -> int: + return hash(self.stack_config) diff --git a/data_safe_haven/config/shm_config.py b/data_safe_haven/config/shm_config.py new file mode 100644 index 0000000000..1fa3410f36 --- /dev/null +++ b/data_safe_haven/config/shm_config.py @@ -0,0 +1,60 @@ +"""SHM configuration file backed by blob storage""" + +from __future__ import annotations + +from typing import ClassVar, Self + +from data_safe_haven.external import AzureSdk +from data_safe_haven.serialisers import AzureSerialisableModel, ContextBase + +from .config_sections import ConfigSectionAzure, ConfigSectionSHM + + +class SHMConfig(AzureSerialisableModel): + config_type: ClassVar[str] = "SHMConfig" + default_filename: ClassVar[str] = "shm.yaml" + azure: ConfigSectionAzure + shm: ConfigSectionSHM + + @classmethod + def from_args( + cls: type[Self], + context: ContextBase, + *, + entra_tenant_id: str, + fqdn: str, + location: str, + ) -> SHMConfig: + """Construct an SHMConfig from arguments.""" + azure_sdk = AzureSdk(subscription_name=context.subscription_name) + admin_group_id = azure_sdk.entra_directory.get_id_from_groupname( + context.admin_group_name + ) + return SHMConfig.model_construct( + azure=ConfigSectionAzure.model_construct( + location=location, + subscription_id=azure_sdk.subscription_id, + tenant_id=azure_sdk.tenant_id, + ), + shm=ConfigSectionSHM.model_construct( + admin_group_id=admin_group_id, + entra_tenant_id=entra_tenant_id, + fqdn=fqdn, + ), + ) + + @classmethod + def template(cls: type[Self]) -> SHMConfig: + """Create SHMConfig without validation to allow "replace me" prompts.""" + return SHMConfig.model_construct( + azure=ConfigSectionAzure.model_construct( + location="Azure location where SHM resources will be deployed.", + subscription_id="ID of the Azure subscription that the SHM will be deployed to", + tenant_id="Home tenant for the Azure account used to deploy infrastructure: `az account show`", + ), + shm=ConfigSectionSHM.model_construct( + admin_group_id="ID of a security group that contains all Azure infrastructure admins.", + entra_tenant_id="Tenant ID for the Entra ID used to manage TRE users", + fqdn="Domain you want your users to belong to and where your TRE will be deployed", + ), + ) diff --git a/data_safe_haven/config/sre_config.py b/data_safe_haven/config/sre_config.py new file mode 100644 index 0000000000..2093190c1c --- /dev/null +++ b/data_safe_haven/config/sre_config.py @@ -0,0 +1,78 @@ +"""SRE configuration file backed by blob storage""" + +from __future__ import annotations + +from typing import ClassVar, Self + +from data_safe_haven.functions import json_safe +from data_safe_haven.serialisers import AzureSerialisableModel, ContextBase +from data_safe_haven.types import SafeString + +from .config_sections import ( + ConfigSectionAzure, + ConfigSectionDockerHub, + ConfigSectionSRE, + ConfigSubsectionRemoteDesktopOpts, +) + + +def sre_config_name(sre_name: str) -> str: + """Construct a safe YAML filename given an input SRE name.""" + return f"sre-{json_safe(sre_name)}.yaml" + + +class SREConfig(AzureSerialisableModel): + config_type: ClassVar[str] = "SREConfig" + default_filename: ClassVar[str] = "sre.yaml" + azure: ConfigSectionAzure + description: str + dockerhub: ConfigSectionDockerHub + name: SafeString + sre: ConfigSectionSRE + + @property + def filename(self) -> str: + """Construct a canonical filename for this SREConfig.""" + return sre_config_name(self.name) + + @classmethod + def from_remote_by_name( + cls: type[Self], context: ContextBase, sre_name: str + ) -> SREConfig: + """Load an SREConfig from Azure storage.""" + return cls.from_remote(context, filename=sre_config_name(sre_name)) + + @classmethod + def template(cls: type[Self]) -> SREConfig: + """Create SREConfig without validation to allow "replace me" prompts.""" + return SREConfig.model_construct( + azure=ConfigSectionAzure.model_construct( + location="Azure location where SRE resources will be deployed.", + subscription_id="ID of the Azure subscription that the SRE will be deployed to", + tenant_id="Home tenant for the Azure account used to deploy infrastructure: `az account show`", + ), + dockerhub=ConfigSectionDockerHub.model_construct( + access_token="A DockerHub personal access token (PAT) with 'Public Read-Only' permissions. See instructions here: https://docs.docker.com/security/for-developers/access-tokens/", + username="Your DockerHub username.", + ), + description="Human-friendly name for this SRE deployment.", + name="A name for this config which consists only of letters, numbers and underscores.", + sre=ConfigSectionSRE.model_construct( + admin_email_address="Email address shared by all administrators", + admin_ip_addresses=["List of IP addresses belonging to administrators"], + databases=["List of database systems to deploy"], + data_provider_ip_addresses=[ + "List of IP addresses belonging to data providers" + ], + remote_desktop=ConfigSubsectionRemoteDesktopOpts.model_construct( + allow_copy="True/False: whether to allow copying text out of the environment", + allow_paste="True/False: whether to allow pasting text into the environment", + ), + research_user_ip_addresses=["List of IP addresses belonging to users"], + software_packages="any/pre-approved/none: which packages from external repositories to allow", + timezone="Timezone in pytz format (eg. Europe/London)", + workspace_skus=[ + "List of Azure VM SKUs - see cloudprice.net for list of valid SKUs" + ], + ), + ) diff --git a/data_safe_haven/console/__init__.py b/data_safe_haven/console/__init__.py new file mode 100644 index 0000000000..133a48fc12 --- /dev/null +++ b/data_safe_haven/console/__init__.py @@ -0,0 +1,9 @@ +from .format import tabulate +from .pretty import pretty_print as print # noqa: A001 +from .prompts import confirm + +__all__ = [ + "confirm", + "print", + "tabulate", +] diff --git a/data_safe_haven/console/format.py b/data_safe_haven/console/format.py new file mode 100644 index 0000000000..dcd19753c8 --- /dev/null +++ b/data_safe_haven/console/format.py @@ -0,0 +1,26 @@ +from rich.table import Table + +from .pretty import pretty_print + + +def tabulate( + header: list[str] | None = None, rows: list[list[str]] | None = None +) -> None: + """Generate a table from header and rows + + Args: + header: The table header + rows: The table rows + + Returns: + A list of strings representing the table + """ + table = Table() + if header: + for item in header: + table.add_column(item) + if rows: + for row in rows: + table.add_row(*row) + + pretty_print(table) diff --git a/data_safe_haven/console/pretty.py b/data_safe_haven/console/pretty.py new file mode 100644 index 0000000000..6a5ca46eab --- /dev/null +++ b/data_safe_haven/console/pretty.py @@ -0,0 +1,15 @@ +from typing import Any + +from rich.console import Console + +console = Console() + + +def pretty_print( + *objects: Any, + sep: str = " ", +) -> None: + console.print( + *objects, + sep=sep, + ) diff --git a/data_safe_haven/console/prompts.py b/data_safe_haven/console/prompts.py new file mode 100644 index 0000000000..201d4fcc28 --- /dev/null +++ b/data_safe_haven/console/prompts.py @@ -0,0 +1,14 @@ +from rich.prompt import Confirm + +from data_safe_haven.logging import get_logger + + +def confirm(message: str, *, default_to_yes: bool) -> bool: + """Ask a user to confirm an action, formatted as a log message""" + logger = get_logger() + + logger.debug(f"Prompting user to confirm '{message}'") + response: bool = Confirm.ask(message, default=default_to_yes) + response_text = "yes" if response else "no" + logger.debug(f"User responded '{response_text}'") + return response diff --git a/data_safe_haven/directories.py b/data_safe_haven/directories.py new file mode 100644 index 0000000000..6e520bcb37 --- /dev/null +++ b/data_safe_haven/directories.py @@ -0,0 +1,25 @@ +from os import getenv +from pathlib import Path + +import appdirs + +_appname = "data_safe_haven" + + +def config_dir() -> Path: + if config_directory_env := getenv("DSH_CONFIG_DIRECTORY"): + config_directory = Path(config_directory_env).resolve() + else: + config_directory = Path(appdirs.user_config_dir(appname=_appname)).resolve() + + return config_directory + + +def log_dir() -> Path: + if log_directory_env := getenv("DSH_LOG_DIRECTORY"): + log_directory = Path(log_directory_env).resolve() + else: + log_directory = Path(appdirs.user_log_dir(appname=_appname)).resolve() + log_directory.mkdir(parents=True, exist_ok=True) + + return log_directory diff --git a/data_safe_haven/exceptions/__init__.py b/data_safe_haven/exceptions/__init__.py new file mode 100644 index 0000000000..b22d70e693 --- /dev/null +++ b/data_safe_haven/exceptions/__init__.py @@ -0,0 +1,132 @@ +from data_safe_haven.logging import get_logger + + +class DataSafeHavenError(Exception): + """ + Parent class for all DataSafeHaven exceptions. + + This class is not intended to be instantiated directly. Developers should use one of the subclasses instead. + """ + + def __init__(self, message: str | bytes): + super().__init__(message) + + # Log exception message as an error + logger = get_logger() + message_str = message if isinstance(message, str) else message.decode("utf-8") + # Replace line breaks with escape code + logger.error(message_str.replace("\n", r"\n")) + + +class DataSafeHavenAzureError(DataSafeHavenError): + """ + Exception class for handling errors when interacting with Azure. + + Raise this error when, for example, creating resources in Azure fails. + """ + + pass + + +class DataSafeHavenAzureStorageError(DataSafeHavenAzureError): + """ + Exception class for handling errors when interacting with Azure Storage. + + Raise this error when a storage account does not exist. + """ + + pass + + +class DataSafeHavenAzureAPIAuthenticationError(DataSafeHavenError): + """ + Exception class for handling errors when authenticating against the Azure API. + + Used to capture exceptions generated when the user is not authenticated or authentication has expired. + """ + + pass + + +class DataSafeHavenConfigError(DataSafeHavenError): + """ + Exception class for handling errors related to configuration files. + + Examples include missing configuration files or invalid configuration values. + """ + + pass + + +class DataSafeHavenEntraIDError(DataSafeHavenError): + """ + Exception class for handling errors when interacting with Entra ID. + + For example, when adding users to an Entra group fails. + """ + + pass + + +class DataSafeHavenIPRangeError(DataSafeHavenError): + """Exception raised when it is not possible to generate a valid IPv4 range.""" + + pass + + +class DataSafeHavenMicrosoftGraphError(DataSafeHavenAzureError): + """ + Exception class for handling errors when interacting with the Microsoft Graph API. + """ + + pass + + +class DataSafeHavenPulumiError(DataSafeHavenError): + """ + Exception class for handling errors when interacting with Pulumi. + + For example, when a Pulumi operation such as a deployment fails. + """ + + pass + + +class DataSafeHavenSSLError(DataSafeHavenError): + """ + Exception class for handling errors related to administration of SSL certificates. + + For example, errors refreshing or creating SSL certificates. + """ + + pass + + +class DataSafeHavenTypeError(DataSafeHavenError): + """ + Exception class for handling errors related to type checking. + + For example, when a function is called with an argument of the wrong type. + """ + + pass + + +class DataSafeHavenUserHandlingError(DataSafeHavenError): + """ + Exception class for handling errors related to user handling. + + For example, when listing or registering users fails. + """ + + pass + + +class DataSafeHavenValueError(DataSafeHavenError): + """ + Exception class for handling errors related to value checking. + + For example, when a function is called with an argument of the wrong value. + """ + + pass diff --git a/data_safe_haven/external/__init__.py b/data_safe_haven/external/__init__.py new file mode 100644 index 0000000000..5e46325958 --- /dev/null +++ b/data_safe_haven/external/__init__.py @@ -0,0 +1,15 @@ +from .api.azure_sdk import AzureSdk +from .api.graph_api import GraphApi +from .interface.azure_container_instance import AzureContainerInstance +from .interface.azure_ipv4_range import AzureIPv4Range +from .interface.azure_postgresql_database import AzurePostgreSQLDatabase +from .interface.pulumi_account import PulumiAccount + +__all__ = [ + "AzureSdk", + "AzureContainerInstance", + "AzureIPv4Range", + "AzurePostgreSQLDatabase", + "GraphApi", + "PulumiAccount", +] diff --git a/environment_configs/package_lists/allowlist-extra-r-cran-tier3.list b/data_safe_haven/external/api/__init__.py similarity index 100% rename from environment_configs/package_lists/allowlist-extra-r-cran-tier3.list rename to data_safe_haven/external/api/__init__.py diff --git a/data_safe_haven/external/api/azure_sdk.py b/data_safe_haven/external/api/azure_sdk.py new file mode 100644 index 0000000000..118536c833 --- /dev/null +++ b/data_safe_haven/external/api/azure_sdk.py @@ -0,0 +1,1288 @@ +"""Interface to the Azure Python SDK""" + +import time +from contextlib import suppress +from typing import Any, cast + +from azure.core.exceptions import ( + AzureError, + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ServiceRequestError, +) +from azure.keyvault.certificates import CertificateClient, KeyVaultCertificate +from azure.keyvault.keys import KeyClient, KeyVaultKey +from azure.keyvault.secrets import SecretClient +from azure.mgmt.compute.v2021_07_01 import ComputeManagementClient +from azure.mgmt.compute.v2021_07_01.models import ( + ResourceSkuCapabilities, + RunCommandInput, + RunCommandInputParameter, + RunCommandResult, +) +from azure.mgmt.dns.v2018_05_01 import DnsManagementClient +from azure.mgmt.dns.v2018_05_01.models import ( + CaaRecord, + RecordSet, + RecordType, + TxtRecord, + Zone, + ZoneType, +) +from azure.mgmt.keyvault.v2021_06_01_preview import KeyVaultManagementClient +from azure.mgmt.keyvault.v2021_06_01_preview.models import ( + AccessPolicyEntry, + Permissions, + Sku as KeyVaultSku, + Vault, + VaultCreateOrUpdateParameters, + VaultProperties, +) +from azure.mgmt.msi.v2022_01_31_preview import ManagedServiceIdentityClient +from azure.mgmt.msi.v2022_01_31_preview.models import Identity +from azure.mgmt.resource.resources.v2021_04_01 import ResourceManagementClient +from azure.mgmt.resource.resources.v2021_04_01.models import ResourceGroup +from azure.mgmt.resource.subscriptions import SubscriptionClient +from azure.mgmt.resource.subscriptions.models import Location, Subscription +from azure.mgmt.storage.v2021_08_01 import StorageManagementClient +from azure.mgmt.storage.v2021_08_01.models import ( + BlobContainer, + Kind as StorageAccountKind, + PublicAccess, + Sku as StorageAccountSku, + StorageAccount, + StorageAccountCreateParameters, + StorageAccountKey, + StorageAccountListKeysResult, +) +from azure.storage.blob import BlobClient, BlobServiceClient +from azure.storage.filedatalake import DataLakeServiceClient + +from data_safe_haven.exceptions import ( + DataSafeHavenAzureAPIAuthenticationError, + DataSafeHavenAzureError, + DataSafeHavenAzureStorageError, + DataSafeHavenValueError, +) +from data_safe_haven.logging import get_logger, get_null_logger +from data_safe_haven.types import AzureSdkCredentialScope + +from .credentials import AzureSdkCredential +from .graph_api import GraphApi + + +class AzureSdk: + """Interface to the Azure Python SDK""" + + def __init__( + self, subscription_name: str, *, disable_logging: bool = False + ) -> None: + self._credentials: dict[AzureSdkCredentialScope, AzureSdkCredential] = {} + self.disable_logging = disable_logging + self.logger = get_null_logger() if disable_logging else get_logger() + self.subscription_name = subscription_name + self.subscription_id_: str | None = None + self.tenant_id_: str | None = None + + @property + def entra_directory(self) -> GraphApi: + return GraphApi(credential=self.credential(AzureSdkCredentialScope.GRAPH_API)) + + @property + def subscription_id(self) -> str: + if not self.subscription_id_: + self.subscription_id_ = str( + self.get_subscription(self.subscription_name).subscription_id + ) + return self.subscription_id_ + + @property + def tenant_id(self) -> str: + if not self.tenant_id_: + self.tenant_id_ = str( + self.get_subscription(self.subscription_name).tenant_id + ) + return self.tenant_id_ + + def blob_client( + self, + resource_group_name: str, + storage_account_name: str, + storage_container_name: str, + blob_name: str, + ) -> BlobClient: + """Construct a client for a blob which may exist or not""" + # Connect to Azure client + storage_account_keys = self.get_storage_account_keys( + resource_group_name, storage_account_name + ) + + # Load blob service client + blob_service_client = BlobServiceClient.from_connection_string( + f"DefaultEndpointsProtocol=https;AccountName={storage_account_name};AccountKey={storage_account_keys[0].value};EndpointSuffix=core.windows.net" + ) + if not isinstance(blob_service_client, BlobServiceClient): + msg = f"Could not connect to storage account '{storage_account_name}'." + raise DataSafeHavenAzureStorageError(msg) + + # Get the blob client + blob_client = blob_service_client.get_blob_client( + container=storage_container_name, blob=blob_name + ) + return blob_client + + def blob_exists( + self, + blob_name: str, + resource_group_name: str, + storage_account_name: str, + storage_container_name: str, + ) -> bool: + """Find out whether a blob file exists in Azure storage + + Returns: + bool: Whether or not the blob exists + """ + + if not self.storage_exists(storage_account_name): + msg = f"Storage account '{storage_account_name}' does not exist." + raise DataSafeHavenAzureStorageError(msg) + try: + blob_client = self.blob_client( + resource_group_name, + storage_account_name, + storage_container_name, + blob_name, + ) + exists = bool(blob_client.exists()) + except DataSafeHavenAzureError: + exists = False + response = "exists" if exists else "does not exist" + self.logger.debug( + f"File [green]{blob_name}[/] {response} in blob storage.", + ) + return exists + + def credential( + self, scope: AzureSdkCredentialScope = AzureSdkCredentialScope.DEFAULT + ) -> AzureSdkCredential: + if scope not in self._credentials: + self._credentials[scope] = AzureSdkCredential( + scope, skip_confirmation=self.disable_logging + ) + return self._credentials[scope] + + def download_blob( + self, + blob_name: str, + resource_group_name: str, + storage_account_name: str, + storage_container_name: str, + ) -> str: + """Download a blob file from Azure storage + + Returns: + str: The contents of the blob + + Raises: + DataSafeHavenAzureError if the blob could not be downloaded + """ + try: + blob_client = self.blob_client( + resource_group_name, + storage_account_name, + storage_container_name, + blob_name, + ) + # Download the requested file + blob_content = blob_client.download_blob(encoding="utf-8").readall() + self.logger.debug( + f"Downloaded file [green]{blob_name}[/] from blob storage.", + ) + return str(blob_content) + except AzureError as exc: + msg = f"Blob file '{blob_name}' could not be downloaded from '{storage_account_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def ensure_dns_caa_record( + self, + record_flags: int, + record_name: str, + record_tag: str, + record_value: str, + resource_group_name: str, + zone_name: str, + ttl: int = 30, + ) -> RecordSet: + """Ensure that a DNS CAA record exists in a DNS zone + + Returns: + RecordSet: The DNS record set + + Raises: + DataSafeHavenAzureError if the record could not be created + """ + try: + # Connect to Azure clients + dns_client = DnsManagementClient(self.credential(), self.subscription_id) + + # Ensure that record exists + self.logger.debug( + f"Ensuring that DNS CAA record [green]{record_name}[/] exists in zone [bold]{zone_name}[/]...", + ) + record_set = dns_client.record_sets.create_or_update( + parameters=RecordSet( + ttl=ttl, + caa_records=[ + CaaRecord( + flags=record_flags, tag=record_tag, value=record_value + ) + ], + ), + record_type=RecordType.CAA, + relative_record_set_name=record_name, + resource_group_name=resource_group_name, + zone_name=zone_name, + ) + self.logger.info( + f"Ensured that DNS CAA record [green]{record_name}[/] exists in zone [bold]{zone_name}[/].", + ) + return record_set + except AzureError as exc: + msg = f"Failed to create DNS CAA record {record_name} in zone {zone_name}.\n{exc}" + raise DataSafeHavenAzureError(msg) from exc + + def ensure_dns_txt_record( + self, + record_name: str, + record_value: str, + resource_group_name: str, + zone_name: str, + ttl: int = 30, + ) -> RecordSet: + """Ensure that a DNS TXT record exists in a DNS zone + + Returns: + RecordSet: The DNS record set + + Raises: + DataSafeHavenAzureError if the record could not be created + """ + try: + # Connect to Azure clients + dns_client = DnsManagementClient(self.credential(), self.subscription_id) + + # Ensure that record exists + self.logger.debug( + f"Ensuring that DNS TXT record [green]{record_name}[/] exists in zone [bold]{zone_name}[/]...", + ) + record_set = dns_client.record_sets.create_or_update( + parameters=RecordSet( + ttl=ttl, txt_records=[TxtRecord(value=[record_value])] + ), + record_type=RecordType.TXT, + relative_record_set_name=record_name, + resource_group_name=resource_group_name, + zone_name=zone_name, + ) + self.logger.info( + f"Ensured that DNS TXT record [green]{record_name}[/] exists in zone [bold]{zone_name}[/].", + ) + return record_set + except AzureError as exc: + msg = f"Failed to create DNS TXT record {record_name} in zone {zone_name}." + raise DataSafeHavenAzureError(msg) from exc + + def ensure_dns_zone( + self, + resource_group_name: str, + zone_name: str, + tags: Any = None, + ) -> Zone: + """Ensure that a DNS zone exists + + Returns: + Zone: The DNS zone + + Raises: + DataSafeHavenAzureError if the zone could not be created + """ + try: + # Connect to Azure clients + dns_client = DnsManagementClient(self.credential(), self.subscription_id) + + # Ensure that record exists + self.logger.debug( + f"Ensuring that DNS zone {zone_name} exists...", + ) + zone = dns_client.zones.create_or_update( + parameters=Zone( + location="Global", + tags=tags, + zone_type=ZoneType.PUBLIC, + ), + resource_group_name=resource_group_name, + zone_name=zone_name, + ) + self.logger.info( + f"Ensured that DNS zone [green]{zone_name}[/] exists.", + ) + return zone + except AzureError as exc: + msg = f"Failed to create DNS zone {zone_name}.\n{exc}" + raise DataSafeHavenAzureError(msg) from exc + + def ensure_keyvault( + self, + admin_group_id: str, + key_vault_name: str, + location: str, + managed_identity: Identity, + resource_group_name: str, + tags: Any = None, + tenant_id: str | None = None, + ) -> Vault: + """Ensure that a KeyVault exists + + Raises: + DataSafeHavenAzureError if the existence of the KeyVault could not be verified + """ + try: + self.logger.debug( + f"Ensuring that key vault [green]{key_vault_name}[/] exists...", + ) + tenant_id = tenant_id if tenant_id else self.tenant_id + + # Connect to Azure clients + key_vault_client = KeyVaultManagementClient( + self.credential(), self.subscription_id + ) + # Ensure that key vault exists + key_vault_client.vaults.begin_create_or_update( + resource_group_name, + key_vault_name, + VaultCreateOrUpdateParameters( + location=location, + tags=tags, + properties=VaultProperties( + tenant_id=tenant_id, + sku=KeyVaultSku(name="standard", family="A"), + access_policies=[ + AccessPolicyEntry( + tenant_id=tenant_id, + object_id=admin_group_id, + permissions=Permissions( + keys=[ + "GET", + "LIST", + "CREATE", + "DECRYPT", + "ENCRYPT", + ], + secrets=["GET", "LIST", "SET"], + certificates=["GET", "LIST", "CREATE"], + ), + ), + AccessPolicyEntry( + tenant_id=tenant_id, + object_id=str(managed_identity.principal_id), + permissions=Permissions( + secrets=["GET", "LIST"], + certificates=["GET", "LIST"], + ), + ), + ], + ), + ), + ) + # Cast to correct spurious type hint in Azure libraries + key_vaults = [ + kv + for kv in cast(list[Vault], key_vault_client.vaults.list()) + if kv.name == key_vault_name + ] + self.logger.info( + f"Ensured that key vault [green]{key_vaults[0].name}[/] exists.", + ) + return key_vaults[0] + except AzureError as exc: + msg = f"Failed to create key vault {key_vault_name}." + raise DataSafeHavenAzureError(msg) from exc + + def ensure_keyvault_key( + self, + key_name: str, + key_vault_name: str, + ) -> KeyVaultKey: + """Ensure that a key exists in the KeyVault + + Returns: + str: The key ID + + Raises: + DataSafeHavenAzureError if the existence of the key could not be verified + """ + try: + # Connect to Azure clients + key_client = KeyClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + + # Ensure that key exists + self.logger.debug(f"Ensuring that key [green]{key_name}[/] exists...") + key = None + try: + key = key_client.get_key(key_name) + except (HttpResponseError, ResourceNotFoundError): + key_client.create_rsa_key(key_name, size=2048) + key = key_client.get_key(key_name) + self.logger.info( + f"Ensured that key [green]{key_name}[/] exists.", + ) + return key + except AzureError as exc: + msg = f"Failed to create key {key_name}." + raise DataSafeHavenAzureError(msg) from exc + + def ensure_managed_identity( + self, + identity_name: str, + location: str, + resource_group_name: str, + ) -> Identity: + """Ensure that a managed identity exists + + Returns: + Identity: The managed identity + + Raises: + DataSafeHavenAzureError if the existence of the managed identity could not be verified + """ + try: + self.logger.debug( + f"Ensuring that managed identity [green]{identity_name}[/] exists...", + ) + msi_client = ManagedServiceIdentityClient( + self.credential(), self.subscription_id + ) + managed_identity = msi_client.user_assigned_identities.create_or_update( + resource_group_name, + identity_name, + Identity(location=location), + ) + self.logger.info( + f"Ensured that managed identity [green]{identity_name}[/] exists.", + ) + return managed_identity + except AzureError as exc: + msg = f"Failed to create managed identity {identity_name}." + raise DataSafeHavenAzureError(msg) from exc + + def ensure_resource_group( + self, + location: str, + resource_group_name: str, + tags: Any = None, + ) -> ResourceGroup: + """Ensure that a resource group exists + + Raises: + DataSafeHavenAzureError if the existence of the resource group could not be verified + """ + try: + # Connect to Azure clients + resource_client = ResourceManagementClient( + self.credential(), self.subscription_id + ) + + # Ensure that resource group exists + self.logger.debug( + f"Ensuring that resource group [green]{resource_group_name}[/] exists...", + ) + resource_client.resource_groups.create_or_update( + resource_group_name, + ResourceGroup(location=location, tags=tags), + ) + # Cast to correct spurious type hint in Azure libraries + resource_groups = [ + rg + for rg in cast( + list[ResourceGroup], resource_client.resource_groups.list() + ) + if rg.name == resource_group_name + ] + self.logger.info( + f"Ensured that resource group [green]{resource_groups[0].name}[/] exists" + f" in [green]{resource_groups[0].location}[/].", + ) + return resource_groups[0] + except AzureError as exc: + msg = f"Failed to create resource group {resource_group_name}." + raise DataSafeHavenAzureError(msg) from exc + + def ensure_storage_account( + self, + location: str, + resource_group_name: str, + storage_account_name: str, + tags: Any = None, + ) -> StorageAccount: + """Ensure that a storage account exists + + Returns: + str: The certificate secret ID + + Raises: + DataSafeHavenAzureError if the existence of the certificate could not be verified + """ + try: + # Connect to Azure clients + storage_client = StorageManagementClient( + self.credential(), self.subscription_id + ) + self.logger.debug( + f"Ensuring that storage account [green]{storage_account_name}[/] exists...", + ) + poller = storage_client.storage_accounts.begin_create( + resource_group_name, + storage_account_name, + StorageAccountCreateParameters( + sku=StorageAccountSku(name="Standard_LRS"), + kind=StorageAccountKind.STORAGE_V2, + location=location, + tags=tags, + ), + ) + storage_account = poller.result() + self.logger.info( + f"Ensured that storage account [green]{storage_account.name}[/] exists.", + ) + return storage_account + except AzureError as exc: + msg = f"Failed to create storage account {storage_account_name}." + raise DataSafeHavenAzureStorageError(msg) from exc + + def ensure_storage_blob_container( + self, + container_name: str, + resource_group_name: str, + storage_account_name: str, + ) -> BlobContainer: + """Ensure that a storage blob container exists + + Returns: + str: The certificate secret ID + + Raises: + DataSafeHavenAzureError if the existence of the certificate could not be verified + """ + # Connect to Azure clients + storage_client = StorageManagementClient( + self.credential(), self.subscription_id + ) + + self.logger.debug( + f"Ensuring that storage container [green]{container_name}[/] exists...", + ) + try: + container = storage_client.blob_containers.create( + resource_group_name, + storage_account_name, + container_name, + BlobContainer(public_access=PublicAccess.NONE), + ) + self.logger.info( + f"Ensured that storage container [green]{container.name}[/] exists.", + ) + return container + except HttpResponseError as exc: + msg = f"Failed to create storage container '{container_name}'." + raise DataSafeHavenAzureStorageError(msg) from exc + + def get_keyvault_certificate( + self, certificate_name: str, key_vault_name: str + ) -> KeyVaultCertificate: + """Read a certificate from the KeyVault + + Returns: + KeyVaultCertificate: The certificate + + Raises: + DataSafeHavenAzureError if the secret could not be read + """ + # Connect to Azure clients + certificate_client = CertificateClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + # Ensure that certificate exists + try: + return certificate_client.get_certificate(certificate_name) + except AzureError as exc: + msg = f"Failed to retrieve certificate {certificate_name}." + raise DataSafeHavenAzureError(msg) from exc + + def get_keyvault_key(self, key_name: str, key_vault_name: str) -> KeyVaultKey: + """Read a key from the KeyVault + + Returns: + KeyVaultKey: The key + + Raises: + DataSafeHavenAzureError if the secret could not be read + """ + # Connect to Azure clients + key_client = KeyClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + # Ensure that certificate exists + try: + return key_client.get_key(key_name) + except (ResourceNotFoundError, HttpResponseError) as exc: + msg = f"Failed to retrieve key {key_name}." + raise DataSafeHavenAzureError(msg) from exc + + def get_keyvault_secret(self, key_vault_name: str, secret_name: str) -> str: + """Read a secret from the KeyVault + + Returns: + str: The secret value + + Raises: + DataSafeHavenAzureError if the secret could not be read + """ + # Connect to Azure clients + secret_client = SecretClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + # Ensure that secret exists + try: + secret = secret_client.get_secret(secret_name) + if secret.value: + return str(secret.value) + msg = f"Secret {secret_name} has no value." + raise DataSafeHavenAzureError(msg) + except AzureError as exc: + msg = f"Failed to retrieve secret {secret_name}." + raise DataSafeHavenAzureError(msg) from exc + + def get_locations(self) -> list[str]: + """Retrieve list of Azure locations + + Returns: + List[str]: Names of Azure locations + """ + try: + subscription_client = SubscriptionClient(self.credential()) + return [ + str(location.name) + for location in cast( + list[Location], + subscription_client.subscriptions.list_locations( + subscription_id=self.subscription_id + ), + ) + ] + except AzureError as exc: + msg = "Azure locations could not be loaded." + raise DataSafeHavenAzureError(msg) from exc + + def get_storage_account_keys( + self, resource_group_name: str, storage_account_name: str, *, attempts: int = 3 + ) -> list[StorageAccountKey]: + """Retrieve the storage account keys for an existing storage account + + Returns: + List[StorageAccountKey]: The keys for this storage account + + Raises: + DataSafeHavenAzureError if the keys could not be loaded + """ + msg_sa = f"storage account '{storage_account_name}'" + msg_rg = f"resource group '{resource_group_name}'" + try: + # Connect to Azure client + storage_client = StorageManagementClient( + self.credential(), self.subscription_id + ) + storage_keys = None + for _ in range(attempts): + with suppress(HttpResponseError): + storage_keys = storage_client.storage_accounts.list_keys( + resource_group_name, + storage_account_name, + ) + if storage_keys: + break + time.sleep(5) + if not isinstance(storage_keys, StorageAccountListKeysResult): + msg = f"Could not connect to {msg_sa} in {msg_rg}." + raise DataSafeHavenAzureStorageError(msg) + keys = cast(list[StorageAccountKey], storage_keys.keys) + if not keys or not isinstance(keys, list) or len(keys) == 0: + msg = f"No keys were retrieved for {msg_sa} in {msg_rg}." + raise DataSafeHavenAzureStorageError(msg) + return keys + except AzureError as exc: + msg = f"Keys could not be loaded for {msg_sa} in {msg_rg}." + raise DataSafeHavenAzureStorageError(msg) from exc + + def get_subscription(self, subscription_name: str) -> Subscription: + """Get an Azure subscription by name.""" + try: + subscription_client = SubscriptionClient(self.credential()) + for subscription in subscription_client.subscriptions.list(): + if subscription.display_name == subscription_name: + return subscription + except ClientAuthenticationError as exc: + msg = "Failed to authenticate with Azure API." + raise DataSafeHavenAzureAPIAuthenticationError(msg) from exc + msg = f"Could not find subscription '{subscription_name}'" + raise DataSafeHavenValueError(msg) + + def import_keyvault_certificate( + self, + certificate_name: str, + certificate_contents: bytes, + key_vault_name: str, + ) -> KeyVaultCertificate: + """Import a signed certificate to in the KeyVault + + Returns: + KeyVaultCertificate: The imported certificate + + Raises: + DataSafeHavenAzureError if the existence of the certificate could not be verified + """ + try: + # Connect to Azure clients + certificate_client = CertificateClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + # Import the certificate, overwriting any existing certificate with the same name + self.logger.debug( + f"Importing certificate [green]{certificate_name}[/]...", + ) + while True: + try: + # Attempt to import this certificate into the keyvault + certificate = certificate_client.import_certificate( + certificate_name=certificate_name, + certificate_bytes=certificate_contents, + enabled=True, + ) + break + except ResourceExistsError: + # Purge any existing deleted certificate with the same name + self.purge_keyvault_certificate(certificate_name, key_vault_name) + self.logger.info( + f"Imported certificate [green]{certificate_name}[/].", + ) + return certificate + except AzureError as exc: + msg = f"Failed to import certificate '{certificate_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def list_available_vm_skus(self, location: str) -> dict[str, dict[str, Any]]: + try: + # Connect to Azure client + compute_client = ComputeManagementClient( + self.credential(), self.subscription_id + ) + # Construct SKU information + skus = {} + for resource_sku in compute_client.resource_skus.list(): + if ( + resource_sku.locations + and (location in resource_sku.locations) + and (resource_sku.resource_type == "virtualMachines") + ): + skus[resource_sku.name] = { + "GPUs": 0 + } # default to 0 GPUs, overriding if appropriate + if resource_sku.capabilities: + # Cast to correct spurious type hint in Azure libraries + for capability in cast( + list[ResourceSkuCapabilities], resource_sku.capabilities + ): + skus[resource_sku.name][capability.name] = capability.value + return skus + except AzureError as exc: + msg = f"Failed to load available VM sizes for Azure location {location}." + raise DataSafeHavenAzureError(msg) from exc + + def purge_keyvault( + self, + key_vault_name: str, + location: str, + ) -> bool: + """Purge a deleted Key Vault from Azure + + Returns: + True: if the Key Vault was purged from a deleted state + False: if the Key Vault did not need to be purged + + Raises: + DataSafeHavenAzureError if the non-existence of the Key Vault could not be verified + """ + try: + # Connect to Azure clients + key_vault_client = KeyVaultManagementClient( + self.credential(), self.subscription_id + ) + + # Check whether a deleted Key Vault exists + try: + key_vault_client.vaults.get_deleted( + vault_name=key_vault_name, + location=location, + ) + except HttpResponseError: + self.logger.info( + f"Key Vault [green]{key_vault_name}[/] does not need to be purged." + ) + return False + + # Purge the Key Vault + with suppress(HttpResponseError): + self.logger.debug( + f"Purging Key Vault [green]{key_vault_name}[/]...", + ) + + # Keep polling until purge is finished + poller = key_vault_client.vaults.begin_purge_deleted( + vault_name=key_vault_name, + location=location, + ) + while not poller.done(): + poller.wait(10) + + # Check whether the Key Vault is still in deleted state + with suppress(HttpResponseError): + if key_vault_client.vaults.get_deleted( + vault_name=key_vault_name, + location=location, + ): + msg = f"Key Vault '{key_vault_name}' exists in deleted state." + raise AzureError(msg) + self.logger.info(f"Purged Key Vault [green]{key_vault_name}[/].") + return True + except AzureError as exc: + msg = f"Failed to remove Key Vault '{key_vault_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def purge_keyvault_certificate( + self, + certificate_name: str, + key_vault_name: str, + ) -> None: + """Purge a deleted certificate from the KeyVault + + Raises: + DataSafeHavenAzureError if the non-existence of the certificate could not be verified + """ + try: + # Connect to Azure clients + certificate_client = CertificateClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + # Ensure that record is removed + self.logger.debug( + f"Purging certificate [green]{certificate_name}[/] from Key Vault [green]{key_vault_name}[/]...", + ) + # Purge the certificate + with suppress(HttpResponseError): + certificate_client.purge_deleted_certificate(certificate_name) + # Wait until certificate no longer exists + while True: + try: + time.sleep(10) + certificate_client.get_deleted_certificate(certificate_name) + except ResourceNotFoundError: + break + self.logger.info( + f"Purged certificate [green]{certificate_name}[/] from Key Vault [green]{key_vault_name}[/].", + ) + except AzureError as exc: + msg = f"Failed to remove certificate '{certificate_name}' from Key Vault '{key_vault_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def remove_blob( + self, + blob_name: str, + resource_group_name: str, + storage_account_name: str, + storage_container_name: str, + ) -> None: + """Remove a file from Azure blob storage + + Returns: + None + + Raises: + DataSafeHavenAzureError if the blob could not be removed + """ + try: + # Connect to Azure client + storage_account_keys = self.get_storage_account_keys( + resource_group_name, storage_account_name + ) + blob_service_client = BlobServiceClient.from_connection_string( + f"DefaultEndpointsProtocol=https;AccountName={storage_account_name};AccountKey={storage_account_keys[0].value};EndpointSuffix=core.windows.net" + ) + if not isinstance(blob_service_client, BlobServiceClient): + msg = f"Could not connect to storage account '{storage_account_name}'." + raise DataSafeHavenAzureStorageError(msg) + # Remove the requested blob + blob_client = blob_service_client.get_blob_client( + container=storage_container_name, blob=blob_name + ) + blob_client.delete_blob(delete_snapshots="include") + self.logger.info( + f"Removed file [green]{blob_name}[/] from blob storage.", + ) + except AzureError as exc: + msg = f"Blob file '{blob_name}' could not be removed from '{storage_account_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def remove_dns_txt_record( + self, + record_name: str, + resource_group_name: str, + zone_name: str, + ) -> None: + """Remove a DNS record if it exists in a DNS zone + + Raises: + DataSafeHavenAzureError if the record could not be removed + """ + try: + # Connect to Azure clients + dns_client = DnsManagementClient(self.credential(), self.subscription_id) + # Check whether resource currently exists + try: + dns_client.record_sets.get( + record_type=RecordType.TXT, + relative_record_set_name=record_name, + resource_group_name=resource_group_name, + zone_name=zone_name, + ) + except ResourceNotFoundError: + self.logger.warning( + f"DNS record [green]{record_name}[/] does not exist in zone [green]{zone_name}[/].", + ) + return + # Ensure that record is removed + self.logger.debug( + f"Ensuring that DNS record [green]{record_name}[/] is removed from zone [green]{zone_name}[/]...", + ) + dns_client.record_sets.delete( + record_type=RecordType.TXT, + relative_record_set_name=record_name, + resource_group_name=resource_group_name, + zone_name=zone_name, + ) + self.logger.info( + f"Ensured that DNS record [green]{record_name}[/] is removed from zone [green]{zone_name}[/].", + ) + except AzureError as exc: + msg = f"Failed to remove DNS record {record_name} from zone {zone_name}." + raise DataSafeHavenAzureError(msg) from exc + + def remove_keyvault_certificate( + self, + certificate_name: str, + key_vault_name: str, + ) -> None: + """Remove a certificate from the KeyVault + + Raises: + DataSafeHavenAzureError if the existence of the certificate could not be verified + """ + try: + # Connect to Azure clients + certificate_client = CertificateClient( + credential=self.credential(AzureSdkCredentialScope.KEY_VAULT), + vault_url=f"https://{key_vault_name}.vault.azure.net", + ) + self.logger.debug( + f"Removing certificate [green]{certificate_name}[/] from Key Vault [green]{key_vault_name}[/]...", + ) + + # Start by attempting to delete + # This might fail if the certificate does not exist or was already deleted + self.logger.debug( + f"Attempting to delete certificate [green]{certificate_name}[/]..." + ) + with suppress(ResourceNotFoundError, ServiceRequestError): + # Keep polling until deletion is finished + poller = certificate_client.begin_delete_certificate(certificate_name) + while not poller.done(): + poller.wait(10) + + # Wait until the certificate shows up as deleted + self.logger.debug( + f"Waiting for deletion to complete for certificate [green]{certificate_name}[/]..." + ) + while True: + # Keep polling until deleted certificate is available + with suppress(ResourceNotFoundError): + if certificate_client.get_deleted_certificate(certificate_name): + break + time.sleep(10) + + # Now attempt to remove a certificate that has been deleted but not purged + self.logger.debug( + f"Attempting to purge certificate [green]{certificate_name}[/]..." + ) + with suppress(ResourceNotFoundError, ServiceRequestError): + certificate_client.purge_deleted_certificate(certificate_name) + + # Now check whether the certificate still exists + self.logger.debug( + f"Checking for existence of certificate [green]{certificate_name}[/]..." + ) + with suppress(ResourceNotFoundError, ServiceRequestError): + certificate_client.get_certificate(certificate_name) + msg = f"Certificate '{certificate_name}' is still in Key Vault '{key_vault_name}' despite deletion." + raise DataSafeHavenAzureError(msg) + + self.logger.info( + f"Removed certificate [green]{certificate_name}[/] from Key Vault [green]{key_vault_name}[/].", + ) + except AzureError as exc: + msg = f"Failed to remove certificate '{certificate_name}' from Key Vault '{key_vault_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def remove_resource_group(self, resource_group_name: str) -> None: + """Remove a resource group with its contents + + Raises: + DataSafeHavenAzureError if the resource group could not be removed + """ + try: + # Connect to Azure clients + resource_client = ResourceManagementClient( + self.credential(), self.subscription_id + ) + + if not resource_client.resource_groups.check_existence(resource_group_name): + self.logger.warning( + f"Resource group [green]{resource_group_name}[/] does not exist.", + ) + return + # Ensure that resource group exists + self.logger.debug( + f"Attempting to remove resource group [green]{resource_group_name}[/]", + ) + poller = resource_client.resource_groups.begin_delete( + resource_group_name, + ) + while not poller.done(): + poller.wait(10) + # Cast to correct spurious type hint in Azure libraries + resource_groups = [ + rg + for rg in cast( + list[ResourceGroup], resource_client.resource_groups.list() + ) + if rg.name == resource_group_name + ] + if resource_groups: + msg = f"There are still {len(resource_groups)} resource group(s) remaining." + raise DataSafeHavenAzureError(msg) + self.logger.info( + f"Ensured that resource group [green]{resource_group_name}[/] does not exist.", + ) + except AzureError as exc: + msg = f"Failed to remove resource group {resource_group_name}." + raise DataSafeHavenAzureError(msg) from exc + + def run_remote_script( + self, + resource_group_name: str, + script: str, + script_parameters: dict[str, str], + vm_name: str, + ) -> str: + """Run a script on a remote virtual machine + + Returns: + str: The script output + + Raises: + DataSafeHavenAzureError if running the script failed + """ + try: + # Connect to Azure clients + compute_client = ComputeManagementClient( + self.credential(), self.subscription_id + ) + vm = compute_client.virtual_machines.get(resource_group_name, vm_name) + if not vm.os_profile: + msg = f"No OSProfile available for VM {vm_name}" + raise ValueError(msg) + command_id = ( + "RunPowerShellScript" + if ( + vm.os_profile.windows_configuration + and not vm.os_profile.linux_configuration + ) + else "RunShellScript" + ) + run_command_parameters = RunCommandInput( + command_id=command_id, + script=list(script.split("\n")), + parameters=[ + RunCommandInputParameter(name=name, value=value) + for name, value in script_parameters.items() + ], + ) + # Run the command and wait until finished + poller = compute_client.virtual_machines.begin_run_command( + resource_group_name, vm_name, run_command_parameters + ) + # Cast to correct spurious type hint in Azure libraries + result = cast(RunCommandResult, poller.result()) + # Return any stdout/stderr from the command + return str(result.value[0].message) if result.value else "" + except AzureError as exc: + msg = f"Failed to run command on '{vm_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def run_remote_script_waiting( + self, + resource_group_name: str, + script: str, + script_parameters: dict[str, str], + vm_name: str, + ) -> str: + """Run a script on a remote virtual machine waiting for other scripts to complete + + Returns: + str: The script output + + Raises: + DataSafeHavenAzureError if running the script failed + """ + while True: + try: + script_output = self.run_remote_script( + resource_group_name=resource_group_name, + script=script, + script_parameters=script_parameters, + vm_name=vm_name, + ) + break + except AzureError as exc: + if all( + reason not in str(exc) + for reason in ( + "The request failed due to conflict with a concurrent request", + "Run command extension execution is in progress", + ) + ): + raise + time.sleep(5) + return script_output + + def set_blob_container_acl( + self, + container_name: str, + desired_acl: str, + resource_group_name: str, + storage_account_name: str, + ) -> None: + """Set the ACL for a blob container + + Raises: + DataSafeHavenAzureError if the ACL could not be set + """ + try: + # Ensure that storage container exists in the storage account + storage_client = StorageManagementClient( + self.credential(), self.subscription_id + ) + try: + container = storage_client.blob_containers.get( + resource_group_name, storage_account_name, container_name + ) + if container.name != container_name: + msg = f"Container '{container_name}' could not be found." + raise HttpResponseError(msg) + except HttpResponseError: + self.logger.warning( + f"Blob container '[green]{container_name}[/]' could not be found" + f" in storage account '[green]{storage_account_name}[/]'." + ) + return + + # Connect to Azure clients + service_client = DataLakeServiceClient( + account_url=f"https://{storage_account_name}.dfs.core.windows.net", + credential=self.credential(), + ) + file_system_client = service_client.get_file_system_client( + file_system=container_name + ) + directory_client = file_system_client._get_root_directory_client() + # Set the desired ACL + directory_client.set_access_control_recursive(acl=desired_acl) + except AzureError as exc: + msg = f"Failed to set ACL '{desired_acl}' on container '{container_name}'." + raise DataSafeHavenAzureError(msg) from exc + + def storage_exists( + self, + storage_account_name: str, + ) -> bool: + """Find out whether a named storage account exists in the Azure subscription + + Returns: + bool: Whether or not the storage account exists + """ + + storage_client = StorageManagementClient( + self.credential(), self.subscription_id + ) + storage_account_names = {s.name for s in storage_client.storage_accounts.list()} + return storage_account_name in storage_account_names + + def upload_blob( + self, + blob_data: bytes | str, + blob_name: str, + resource_group_name: str, + storage_account_name: str, + storage_container_name: str, + ) -> None: + """Upload a file to Azure blob storage + + Returns: + None + + Raises: + DataSafeHavenAzureError if the blob could not be uploaded + """ + try: + blob_client = self.blob_client( + resource_group_name, + storage_account_name, + storage_container_name, + blob_name, + ) + # Upload the created file + blob_client.upload_blob(blob_data, overwrite=True) + self.logger.debug( + f"Uploaded file [green]{blob_name}[/] to blob storage.", + ) + except AzureError as exc: + msg = f"Blob file '{blob_name}' could not be uploaded to '{storage_account_name}'." + raise DataSafeHavenAzureError(msg) from exc diff --git a/data_safe_haven/external/api/credentials.py b/data_safe_haven/external/api/credentials.py new file mode 100644 index 0000000000..e029a6335b --- /dev/null +++ b/data_safe_haven/external/api/credentials.py @@ -0,0 +1,220 @@ +"""Classes related to Azure credentials""" + +from abc import abstractmethod +from collections.abc import Sequence +from datetime import UTC, datetime +from typing import Any, ClassVar + +import jwt +from azure.core.credentials import AccessToken, TokenCredential +from azure.identity import ( + AuthenticationRecord, + AzureCliCredential, + CredentialUnavailableError, + DeviceCodeCredential, + TokenCachePersistenceOptions, +) + +from data_safe_haven import console +from data_safe_haven.directories import config_dir +from data_safe_haven.exceptions import DataSafeHavenAzureError, DataSafeHavenValueError +from data_safe_haven.logging import get_logger +from data_safe_haven.types import AzureSdkCredentialScope + + +class DeferredCredential(TokenCredential): + """A token credential that wraps and caches other credential classes.""" + + tokens_: ClassVar[dict[str, AccessToken]] = {} + cache_: ClassVar[set[tuple[str, str]]] = set() + + def __init__( + self, + *, + scopes: Sequence[str], + skip_confirmation: bool, + tenant_id: str | None = None, + ) -> None: + self.skip_confirmation = skip_confirmation + self.logger = get_logger() + self.scopes = scopes + self.tenant_id = tenant_id + + @property + def token(self) -> str: + """Get a token from the credential provider.""" + return str(self.get_token(*self.scopes, tenant_id=self.tenant_id).token) + + @classmethod + def decode_token(cls, auth_token: str) -> dict[str, Any]: + try: + return dict( + jwt.decode( + auth_token, + algorithms=["RS256"], + options={"verify_signature": False}, + ) + ) + except (jwt.exceptions.DecodeError, KeyError) as exc: + msg = "Could not interpret input as an Azure authentication token." + raise DataSafeHavenValueError(msg) from exc + + @abstractmethod + def get_credential(self) -> TokenCredential: + """Get a credential provider from the child class.""" + + def confirm_credentials_interactive( + self, + target_name: str, + user_name: str, + user_id: str, + tenant_name: str, + tenant_id: str, + ) -> None: + """ + Allow user to confirm that credentials are correct. + + Responses are cached so the user will only be prompted once per run. + If 'skip_confirmation' is set, then no confirmation will be performed. + + Raises: + DataSafeHavenValueError: if the user indicates that the credentials are wrong + """ + if self.skip_confirmation: + return + if (user_id, tenant_id) in DeferredCredential.cache_: + return + DeferredCredential.cache_.add((user_id, tenant_id)) + self.logger.info(f"You are logged into the [blue]{target_name}[/] as:") + self.logger.info(f"\tuser: [green]{user_name}[/] ({user_id})") + self.logger.info(f"\ttenant: [green]{tenant_name}[/] ({tenant_id})") + if not console.confirm("Are these details correct?", default_to_yes=True): + msg = "Selected credentials are incorrect." + raise DataSafeHavenValueError(msg) + + def get_token( + self, + *scopes: str, + **kwargs: Any, + ) -> AccessToken: + combined_scopes = " ".join(scopes) + # Require at least 10 minutes of remaining validity + # The 'expires_on' property is a Unix timestamp integer in seconds + validity_cutoff = datetime.now(tz=UTC).timestamp() + 10 * 60 + if not DeferredCredential.tokens_.get(combined_scopes, None) or ( + DeferredCredential.tokens_[combined_scopes].expires_on < validity_cutoff + ): + # Generate a new token and store it at class-level token + DeferredCredential.tokens_[combined_scopes] = ( + self.get_credential().get_token(*scopes, **kwargs) + ) + return DeferredCredential.tokens_[combined_scopes] + + +class AzureSdkCredential(DeferredCredential): + """ + Credential loader used by AzureSdk + + Uses AzureCliCredential for authentication + """ + + def __init__( + self, + scope: AzureSdkCredentialScope = AzureSdkCredentialScope.DEFAULT, + *, + skip_confirmation: bool = False, + ) -> None: + super().__init__(scopes=[scope.value], skip_confirmation=skip_confirmation) + + def get_credential(self) -> TokenCredential: + """Get a new AzureCliCredential.""" + credential = AzureCliCredential(additionally_allowed_tenants=["*"]) + # Confirm that these are the desired credentials + try: + decoded = self.decode_token(credential.get_token(*self.scopes).token) + self.confirm_credentials_interactive( + "Azure CLI", + user_name=decoded["name"], + user_id=decoded["oid"], + tenant_name=decoded["upn"].split("@")[1], + tenant_id=decoded["tid"], + ) + except (CredentialUnavailableError, DataSafeHavenValueError) as exc: + self.logger.error( + "Please authenticate with Azure: run '[green]az login[/]' using [bold]infrastructure administrator[/] credentials." + ) + msg = "Error getting account information from Azure CLI." + raise DataSafeHavenAzureError(msg) from exc + return credential + + +class GraphApiCredential(DeferredCredential): + """ + Credential loader used by GraphApi + + Uses DeviceCodeCredential for authentication + """ + + def __init__( + self, + tenant_id: str, + *, + scopes: Sequence[str] = [], + skip_confirmation: bool = False, + ) -> None: + super().__init__( + scopes=scopes, tenant_id=tenant_id, skip_confirmation=skip_confirmation + ) + + def get_credential(self) -> TokenCredential: + """Get a new DeviceCodeCredential, using cached credentials if they are available""" + cache_name = f"dsh-{self.tenant_id}" + authentication_record_path = ( + config_dir() / f".msal-authentication-cache-{cache_name}" + ) + + # Read an existing authentication record, using default arguments if unavailable + kwargs = {} + if authentication_record_path.is_file(): + with open(authentication_record_path) as f_auth: + existing_auth_record = AuthenticationRecord.deserialize(f_auth.read()) + kwargs["authentication_record"] = existing_auth_record + else: + kwargs["authority"] = "https://login.microsoftonline.com/" + # Use the Microsoft Graph Command Line Tools client ID + kwargs["client_id"] = "14d82eec-204b-4c2f-b7e8-296a70dab67e" + kwargs["tenant_id"] = self.tenant_id + + # Get a credential with a custom callback + def callback(verification_uri: str, user_code: str, _: datetime) -> None: + self.logger.info( + f"Go to [bold]{verification_uri}[/] in a web browser and enter the code [bold]{user_code}[/] at the prompt." + ) + self.logger.info( + "Use [bold]global administrator credentials[/] for your [blue]Entra ID directory[/] to sign-in." + ) + + credential = DeviceCodeCredential( + cache_persistence_options=TokenCachePersistenceOptions( + name=cache_name, allow_unencrypted_storage=True + ), + prompt_callback=callback, + **kwargs, + ) + + # Write out an authentication record for this credential + new_auth_record = credential.authenticate(scopes=self.scopes) + with open(authentication_record_path, "w") as f_auth: + f_auth.write(new_auth_record.serialize()) + + # Confirm that these are the desired credentials + self.confirm_credentials_interactive( + "Microsoft Graph API", + user_name=new_auth_record.username, + user_id=new_auth_record._home_account_id.split(".")[0], + tenant_name=new_auth_record._username.split("@")[1], + tenant_id=new_auth_record._tenant_id, + ) + + # Return the credential + return credential diff --git a/data_safe_haven/external/api/graph_api.py b/data_safe_haven/external/api/graph_api.py new file mode 100644 index 0000000000..ce87648fa9 --- /dev/null +++ b/data_safe_haven/external/api/graph_api.py @@ -0,0 +1,1102 @@ +"""Interface to the Microsoft Graph API""" + +import datetime +import json +import time +from collections.abc import Sequence +from contextlib import suppress +from typing import Any, ClassVar, Self + +import requests +import typer +from dns import resolver + +from data_safe_haven import console +from data_safe_haven.exceptions import ( + DataSafeHavenMicrosoftGraphError, + DataSafeHavenValueError, +) +from data_safe_haven.functions import alphanumeric +from data_safe_haven.logging import get_logger, get_null_logger + +from .credentials import DeferredCredential, GraphApiCredential + + +class GraphApi: + """Interface to the Microsoft Graph REST API""" + + application_ids: ClassVar[dict[str, str]] = { + "Microsoft Graph": "00000003-0000-0000-c000-000000000000", + } + role_template_ids: ClassVar[dict[str, str]] = { + "Global Administrator": "62e90394-69f5-4237-9190-012177145e10" + } + uuid_application: ClassVar[dict[str, str]] = { + "Application.ReadWrite.All": "1bfefb4e-e0b5-418b-a88f-73c46d2cc8e9", + "AppRoleAssignment.ReadWrite.All": "06b708a9-e830-4db3-a914-8e69da51d44f", + "Directory.Read.All": "7ab1d382-f21e-4acd-a863-ba3e13f7da61", + "Domain.Read.All": "dbb9058a-0e50-45d7-ae91-66909b5d4664", + "Group.Read.All": "5b567255-7703-4780-807c-7be8301ae99b", + "Group.ReadWrite.All": "62a82d76-70ea-41e2-9197-370581804d09", + "GroupMember.Read.All": "98830695-27a2-44f7-8c18-0c3ebc9698f6", + "GroupMember.ReadWrite.All": "dbaae8cf-10b5-4b86-a4a1-f871c94c6695", + "User.Read.All": "df021288-bdef-4463-88db-98f22de89214", + "User.ReadWrite.All": "741f803b-c850-494e-b5df-cde7c675a1ca", + "UserAuthenticationMethod.ReadWrite.All": "50483e42-d915-4231-9639-7fdb7fd190e5", + } + uuid_delegated: ClassVar[dict[str, str]] = { + "GroupMember.Read.All": "bc024368-1153-4739-b217-4326f2e966d0", + "User.Read.All": "a154be20-db9c-4678-8ab7-66f6cc099a59", + } + + def __init__( + self, + *, + credential: DeferredCredential, + disable_logging: bool = False, + ): + self.base_endpoint = "https://graph.microsoft.com/v1.0" + self.credential = credential + self.logger = get_null_logger() if disable_logging else get_logger() + + @classmethod + def from_scopes( + cls: type[Self], + *, + scopes: Sequence[str], + tenant_id: str, + disable_logging: bool = False, + ) -> "GraphApi": + return cls( + credential=GraphApiCredential( + scopes=scopes, tenant_id=tenant_id, skip_confirmation=disable_logging + ), + disable_logging=disable_logging, + ) + + @classmethod + def from_token( + cls: type[Self], auth_token: str, *, disable_logging: bool = False + ) -> "GraphApi": + """Construct a GraphApi from an existing authentication token.""" + try: + decoded = DeferredCredential.decode_token(auth_token) + return cls.from_scopes( + disable_logging=disable_logging, + scopes=str(decoded["scp"]).split(), + tenant_id=decoded["tid"], + ) + except DataSafeHavenValueError as exc: + msg = "Could not construct GraphApi from provided token." + raise DataSafeHavenValueError(msg) from exc + + @property + def token(self) -> str: + return self.credential.token + + def add_custom_domain(self, domain_name: str) -> str: + """Add Entra ID custom domain + + Returns: + str: Registration TXT record + + Raises: + DataSafeHavenMicrosoftGraphError if domain could not be added + """ + try: + # Create the Entra ID custom domain if it does not already exist + domains = self.read_domains() + if not any(domain["id"] == domain_name for domain in domains): + self.http_post( + f"{self.base_endpoint}/domains", + json={"id": domain_name}, + ) + # Get the DNS verification records for the custom domain + response = self.http_get( + f"{self.base_endpoint}/domains/{domain_name}/verificationDnsRecords" + ) + txt_records: list[str] = [ + record["text"] + for record in response.json()["value"] + if record["recordType"] == "Txt" + ] + if not txt_records: + msg = f"Could not retrieve verification DNS records for {domain_name}." + raise DataSafeHavenMicrosoftGraphError(msg) + return txt_records[0] + except Exception as exc: + msg = f"Could not register domain '{domain_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def add_user_to_group( + self, + username: str, + group_name: str, + ) -> None: + """Add a user to a group + + Raises: + DataSafeHavenMicrosoftGraphError if the user could not be added to the group. + """ + try: + user_id = self.get_id_from_username(username) + group_id = self.get_id_from_groupname(group_name) + json_response = self.http_get( + f"{self.base_endpoint}/groups/{group_id}/members", + ).json() + # If user already belongs to group then do nothing further + if any(user_id == member["id"] for member in json_response["value"]): + self.logger.info( + f"User [green]'{username}'[/] is already a member of group [green]'{group_name}'[/]." + ) + # Otherwise add the user to the group + else: + request_json = { + "@odata.id": f"https://graph.microsoft.com/v1.0/directoryObjects/{user_id}" + } + self.http_post( + f"{self.base_endpoint}/groups/{group_id}/members/$ref", + json=request_json, + ) + self.logger.info( + f"Added user [green]'{username}'[/] to group [green]'{group_name}'[/]." + ) + except DataSafeHavenMicrosoftGraphError as exc: + msg = f"Could not add user '{username}' to group '{group_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def create_application( + self, + application_name: str, + application_scopes: Sequence[str] = [], + delegated_scopes: Sequence[str] = [], + request_json: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Create an Entra application if it does not already exist + + Raises: + DataSafeHavenMicrosoftGraphError if the application could not be created + """ + try: + # Check for an existing application + json_response: dict[str, Any] + if existing_application := self.get_application_by_name(application_name): + self.logger.info( + f"Application '[green]{application_name}[/]' already exists." + ) + json_response = existing_application + else: + # Create a new application + self.logger.debug( + f"Creating new application '[green]{application_name}[/]'...", + ) + if not request_json: + request_json = { + "displayName": application_name, + "signInAudience": "AzureADMyOrg", + "passwordCredentials": [], + "publicClient": { + "redirectUris": [ + "https://login.microsoftonline.com/common/oauth2/nativeclient", + "urn:ietf:wg:oauth:2.0:oob", + ] + }, + } + # Add scopes if there are any + scopes = [ + { + "id": self.uuid_application[application_scope], + "type": "Role", # 'Role' is the type for application permissions + } + for application_scope in application_scopes + ] + [ + { + "id": self.uuid_delegated[delegated_scope], + "type": "Scope", # 'Scope' is the type for delegated permissions + } + for delegated_scope in delegated_scopes + ] + if scopes: + request_json["requiredResourceAccess"] = [ + { + "resourceAppId": self.application_ids["Microsoft Graph"], + "resourceAccess": scopes, + } + ] + self.logger.debug("Making creation HTTP POST request.") + json_response = self.http_post( + f"{self.base_endpoint}/applications", + json=request_json, + ).json() + self.logger.info( + f"Created new application '[green]{json_response['displayName']}[/]'.", + ) + + # Ensure that the application service principal exists + self.ensure_application_service_principal(application_name) + + # Grant admin consent for the requested scopes + if application_scopes or delegated_scopes: + for scope in application_scopes: + self.grant_application_role_permissions(application_name, scope) + for scope in delegated_scopes: + self.grant_delegated_role_permissions(application_name, scope) + attempts = 0 + max_attempts = 5 + while attempts < max_attempts: + if application_sp := self.get_service_principal_by_name( + application_name + ): + if self.read_application_permissions(application_sp["id"]): + break + time.sleep(10) + attempts += 1 + + if attempts == max_attempts: + msg = "Maximum attempts to validate service principle permissions exceeded" + raise DataSafeHavenMicrosoftGraphError(msg) + + # Return JSON representation of the Entra application + return json_response + except Exception as exc: + msg = f"Could not create application '{application_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def create_application_secret( + self, application_name: str, application_secret_name: str + ) -> str: + """Add a secret to an existing Entra application, overwriting any existing secret. + + Returns: + str: Contents of newly-created secret + + Raises: + DataSafeHavenMicrosoftGraphError if the secret could not be created + """ + try: + application_json = self.get_application_by_name(application_name) + if not application_json: + msg = f"Could not retrieve application '{application_name}'" + raise DataSafeHavenMicrosoftGraphError(msg) + # If the secret already exists then remove it + if "passwordCredentials" in application_json: + for secret in application_json["passwordCredentials"]: + if secret["displayName"] == application_secret_name: + self.logger.debug( + f"Removing pre-existing secret '{secret['displayName']}' from application '{application_name}'." + ) + self.http_post( + f"{self.base_endpoint}/applications/{application_json['id']}/removePassword", + json={"keyId": secret["keyId"]}, + ) + # Create the application secret + self.logger.debug( + f"Creating application secret '[green]{application_secret_name}[/]'...", + ) + request_json = { + "passwordCredential": { + "displayName": application_secret_name, + "endDateTime": ( + datetime.datetime.now(datetime.UTC) + + datetime.timedelta(weeks=520) + ).strftime("%Y-%m-%dT%H:%M:%SZ"), + } + } + json_response = self.http_post( + f"{self.base_endpoint}/applications/{application_json['id']}/addPassword", + json=request_json, + ).json() + self.logger.debug( + f"Created application secret '[green]{application_secret_name}[/]'.", + ) + return str(json_response["secretText"]) + except Exception as exc: + msg = f"Could not create application secret '{application_secret_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def create_group(self, group_name: str) -> None: + """Create an Entra group if it does not already exist + + Raises: + DataSafeHavenMicrosoftGraphError if the group could not be created + """ + try: + if self.get_id_from_groupname(group_name): + self.logger.info( + f"Found existing Entra group '[green]{group_name}[/]'.", + ) + return + self.logger.debug( + f"Creating Entra group '[green]{group_name}[/]'...", + ) + request_json = { + "description": group_name, + "displayName": group_name, + "groupTypes": [], + "mailEnabled": False, + "mailNickname": alphanumeric(group_name).lower(), + "securityEnabled": True, + } + self.http_post( + f"{self.base_endpoint}/groups", + json=request_json, + ).json() + self.logger.info( + f"Created Entra group '[green]{group_name}[/]'.", + ) + except Exception as exc: + msg = f"Could not create Entra group '{group_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def ensure_application_service_principal( + self, application_name: str + ) -> dict[str, Any]: + """Create a service principal for an Entra application if it does not already exist + + Raises: + DataSafeHavenMicrosoftGraphError if the service principal could not be created + """ + try: + # Return existing service principal if there is one + application_sp = self.get_service_principal_by_name(application_name) + if not application_sp: + # Otherwise we need to try + self.logger.debug( + f"Creating service principal for application '[green]{application_name}[/]'...", + ) + application_json = self.get_application_by_name(application_name) + if not application_json: + msg = f"Could not retrieve application '{application_name}'" + raise DataSafeHavenMicrosoftGraphError(msg) + self.http_post( + f"{self.base_endpoint}/servicePrincipals", + json={"appId": application_json["appId"]}, + ).json() + self.logger.info( + f"Created service principal for application '[green]{application_name}[/]'.", + ) + application_sp = self.get_service_principal_by_name(application_name) + if not application_sp: + msg = f"service principal for application '[green]{application_name}[/]' not found." + raise DataSafeHavenMicrosoftGraphError(msg) + return application_sp + except Exception as exc: + msg = f"Could not create service principal for application '{application_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def create_user( + self, + request_json: dict[str, Any], + email_address: str, + phone_number: str, + ) -> None: + """Create an Entra user if it does not already exist + + Raises: + DataSafeHavenMicrosoftGraphError if the user could not be created + """ + username = request_json["mailNickname"] + final_verb = "create/update" + try: + # Check whether user already exists + user_id = self.get_id_from_username(username) + if user_id: + self.logger.debug( + f"Updating Entra user '[green]{username}[/]'...", + ) + final_verb = "Update" + else: + self.logger.debug( + f"Creating Entra user '[green]{username}[/]'...", + ) + final_verb = "Create" + # If they do not then create them + endpoint = f"{self.base_endpoint}/users" + json_response = self.http_post( + endpoint, + json=request_json, + ).json() + user_id = json_response["id"] + # Set the authentication email address + try: + response = self.http_get( + f"https://graph.microsoft.com/beta/users/{user_id}/authentication/emailMethods" + ) + if existing_email_addresses := [ + item["emailAddress"] for item in response.json()["value"] + ]: + self.logger.warning( + f"Email authentication is already set up for Entra user '[green]{username}[/]' using {existing_email_addresses}." + ) + else: + self.http_post( + f"https://graph.microsoft.com/beta/users/{user_id}/authentication/emailMethods", + json={"emailAddress": email_address}, + ) + except DataSafeHavenMicrosoftGraphError as exc: + msg = f"Failed to add authentication email address '{email_address}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + # Set the authentication phone number + try: + response = self.http_get( + f"https://graph.microsoft.com/beta/users/{user_id}/authentication/phoneMethods" + ) + if existing_phone_numbers := [ + item["phoneNumber"] for item in response.json()["value"] + ]: + self.logger.warning( + f"Phone authentication is already set up for Entra user '[green]{username}[/]' using {existing_phone_numbers}." + ) + else: + self.http_post( + f"https://graph.microsoft.com/beta/users/{user_id}/authentication/phoneMethods", + json={"phoneNumber": phone_number, "phoneType": "mobile"}, + ) + except DataSafeHavenMicrosoftGraphError as exc: + msg = f"Failed to add authentication phone number '{phone_number}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + # Ensure user is enabled + self.http_patch( + f"{self.base_endpoint}/users/{user_id}", + json={"accountEnabled": True}, + ) + self.logger.info( + f"{final_verb}d Entra user '[green]{username}[/]'.", + ) + except DataSafeHavenMicrosoftGraphError as exc: + msg = f"Could not {final_verb.lower()} user {username}." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def delete_application( + self, + application_name: str, + ) -> None: + """Remove an application from Entra ID + + Raises: + DataSafeHavenMicrosoftGraphError if the application could not be deleted + """ + try: + # Delete the application if it exists + if application := self.get_application_by_name(application_name): + self.logger.debug( + f"Deleting application '[green]{application_name}[/]'...", + ) + self.http_delete( + f"{self.base_endpoint}/applications/{application['id']}", + ) + self.logger.info( + f"Deleted application '[green]{application_name}[/]'.", + ) + except Exception as exc: + msg = f"Could not delete application '{application_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def get_application_by_name(self, application_name: str) -> dict[str, Any] | None: + try: + return next( + application + for application in self.read_applications() + if application["displayName"] == application_name + ) + except (DataSafeHavenMicrosoftGraphError, StopIteration): + return None + + def get_service_principal_by_name( + self, service_principal_name: str + ) -> dict[str, Any] | None: + try: + return next( + service_principal + for service_principal in self.read_service_principals() + if service_principal["displayName"] == service_principal_name + ) + except (DataSafeHavenMicrosoftGraphError, StopIteration): + return None + + def get_id_from_groupname(self, group_name: str) -> str | None: + try: + return str( + next( + group + for group in self.read_groups() + if group["displayName"] == group_name + )["id"] + ) + except (DataSafeHavenMicrosoftGraphError, StopIteration): + return None + + def get_id_from_username(self, username: str) -> str | None: + try: + return str( + next( + user + for user in self.read_users() + if user["userPrincipalName"].split("@")[0] == username + )["id"] + ) + except (DataSafeHavenMicrosoftGraphError, StopIteration): + return None + + def grant_role_permissions( + self, + application_name: str, + *, + application_role_assignments: Sequence[str], + delegated_role_assignments: Sequence[str], + ) -> None: + """ + Grant roles to the service principal associated with an application and give admin approval to these roles + + These can be either application or delegated roles. + + - Application roles allow the application to perform an action itself. + - Delegated roles allow the application to ask a user for permission to perform an action. + + See https://learn.microsoft.com/en-us/graph/permissions-grant-via-msgraph for more details. + + Raises: + DataSafeHavenMicrosoftGraphError if one or more roles could not be assigned. + """ + # Ensure that the application has a service principal + self.ensure_application_service_principal(application_name) + + # Grant any requested application role permissions + for role_name in application_role_assignments: + self.grant_application_role_permissions(application_name, role_name) + + # Grant any requested delegated role permissions + for role_name in delegated_role_assignments: + self.grant_delegated_role_permissions(application_name, role_name) + + def grant_application_role_permissions( + self, application_name: str, application_role_name: str + ) -> None: + """ + Assign a named application role to the service principal associated with an application. + Additionally provide Global Admin approval for the application to hold this role. + Application roles allow the application to perform an action itself. + + See https://learn.microsoft.com/en-us/graph/permissions-grant-via-msgraph for more details. + + Raises: + DataSafeHavenMicrosoftGraphError if one or more roles could not be assigned. + """ + try: + # Get service principals for Microsoft Graph and this application + microsoft_graph_sp = self.get_service_principal_by_name("Microsoft Graph") + if not microsoft_graph_sp: + msg = "Could not find Microsoft Graph service principal." + raise DataSafeHavenMicrosoftGraphError(msg) + application_sp = self.get_service_principal_by_name(application_name) + if not application_sp: + msg = f"Could not find application service principal for application {application_name}." + raise DataSafeHavenMicrosoftGraphError(msg) + # Check whether permission is already granted + app_role_id = self.uuid_application[application_role_name] + response = self.http_get( + f"{self.base_endpoint}/servicePrincipals/{microsoft_graph_sp['id']}/appRoleAssignedTo", + ) + for application in response.json().get("value", []): + if (application["appRoleId"] == app_role_id) and ( + application["principalDisplayName"] == application_name + ): + self.logger.debug( + f"Application role '[green]{application_role_name}[/]' already assigned to '{application_name}'.", + ) + return + # Otherwise grant permissions for this role to the application + self.logger.debug( + f"Assigning application role '[green]{application_role_name}[/]' to '{application_name}'...", + ) + request_json = { + "principalId": application_sp["id"], + "resourceId": microsoft_graph_sp["id"], + "appRoleId": app_role_id, + } + self.http_post( + f"{self.base_endpoint}/servicePrincipals/{microsoft_graph_sp['id']}/appRoleAssignments", + json=request_json, + ) + self.logger.info( + f"Assigned application role '[green]{application_role_name}[/]' to '{application_name}'.", + ) + except Exception as exc: + msg = f"Could not assign application role '{application_role_name}' to application '{application_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def grant_delegated_role_permissions( + self, application_name: str, application_role_name: str + ) -> None: + """ + Assign a named delegated role to the service principal associated with an application. + Additionally provide Global Admin approval for the application to hold this role. + Delegated roles allow the application to ask a user for permission to perform an action. + + See https://learn.microsoft.com/en-us/graph/permissions-grant-via-msgraph for more details. + + Raises: + DataSafeHavenMicrosoftGraphError if one or more roles could not be assigned. + """ + try: + # Get service principals for Microsoft Graph and this application + microsoft_graph_sp = self.get_service_principal_by_name("Microsoft Graph") + if not microsoft_graph_sp: + msg = "Could not find Microsoft Graph service principal." + raise DataSafeHavenMicrosoftGraphError(msg) + application_sp = self.get_service_principal_by_name(application_name) + if not application_sp: + msg = "Could not find application service principal." + raise DataSafeHavenMicrosoftGraphError(msg) + # Check existing permissions + response = self.http_get(f"{self.base_endpoint}/oauth2PermissionGrants") + self.logger.debug( + f"Assigning delegated role '[green]{application_role_name}[/]' to '{application_name}'...", + ) + # If there are existing permissions then we need to patch + application = next( + ( + app + for app in response.json().get("value", []) + if app["clientId"] == application_sp["id"] + ), + None, + ) + if application: + request_json = { + "scope": f"{application['scope']} {application_role_name}" + } + response = self.http_patch( + f"{self.base_endpoint}/oauth2PermissionGrants/{application['id']}", + json=request_json, + ) + # Otherwise we need to make a new delegation request + else: + request_json = { + "clientId": application_sp["id"], + "consentType": "AllPrincipals", + "resourceId": microsoft_graph_sp["id"], + "scope": application_role_name, + } + response = self.http_post( + f"{self.base_endpoint}/oauth2PermissionGrants", + json=request_json, + ) + self.logger.info( + f"Assigned delegated role '[green]{application_role_name}[/]' to '{application_name}'.", + ) + except Exception as exc: + msg = f"Could not assign delegated role '{application_role_name}' to application '{application_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + @staticmethod + def http_raise_for_status(response: requests.Response) -> None: + """Check the status of a response + + Raises: + RequestException if the response did not succeed + """ + # We do not use response.ok as this allows 3xx codes + if requests.codes.OK <= response.status_code < requests.codes.MULTIPLE_CHOICES: + return + raise requests.exceptions.RequestException( + response=response, request=response.request + ) + + def http_delete(self, url: str, **kwargs: Any) -> requests.Response: + """Make an HTTP DELETE request + + Returns: + requests.Response: The response from the remote server + + Raises: + DataSafeHavenMicrosoftGraphError if the request failed + """ + try: + response = requests.delete( + url, + headers={"Authorization": f"Bearer {self.token}"}, + timeout=120, + **kwargs, + ) + self.http_raise_for_status(response) + return response + + except requests.exceptions.RequestException as exc: + msg = f"Could not execute DELETE request to '{url}'." + if exc.response: + msg += f" Response content received: '{exc.response.content.decode()}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def http_get_single_page(self, url: str, **kwargs: Any) -> requests.Response: + """Make an HTTP GET request + + Returns: + requests.Response: The response from the remote server + + Raises: + DataSafeHavenMicrosoftGraphError if the request failed + """ + try: + response = requests.get( + url, + headers={"Authorization": f"Bearer {self.token}"}, + timeout=120, + **kwargs, + ) + self.http_raise_for_status(response) + return response + except requests.exceptions.RequestException as exc: + msg = f"Could not execute GET request to '{url}'." + if exc.response: + msg += f" Response content received: '{exc.response.content.decode()}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def http_get(self, url: str, **kwargs: Any) -> requests.Response: + """Make a paged HTTP GET request and return all values + + Returns: + requests.Response: The response from the remote server, with all values combined + + Raises: + DataSafeHavenMicrosoftGraphError if the request failed + """ + try: + base_url = url + values = [] + + # Keep requesting new pages until there are no more + while True: + response = self.http_get_single_page(url, **kwargs) + values += response.json()["value"] + url = response.json().get("@odata.nextLink", None) + if not url: + break + + # Add previous response values into the content bytes + json_content = response.json() + json_content["value"] = values + response._content = json.dumps(json_content).encode("utf-8") + + # Return the full response + self.http_raise_for_status(response) + return response + except requests.exceptions.RequestException as exc: + msg = f"Could not execute GET request to '{base_url}'." + if exc.response: + msg += f" Response content received: '{exc.response.content.decode()}'." + msg += f" Token {self.token}." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def http_patch(self, url: str, **kwargs: Any) -> requests.Response: + """Make an HTTP PATCH request + + Returns: + requests.Response: The response from the remote server + + Raises: + DataSafeHavenMicrosoftGraphError if the request failed + """ + try: + response = requests.patch( + url, + headers={"Authorization": f"Bearer {self.token}"}, + timeout=120, + **kwargs, + ) + self.http_raise_for_status(response) + return response + except requests.exceptions.RequestException as exc: + msg = f"Could not execute PATCH request to '{url}'." + if exc.response: + msg += f" Response content received: '{exc.response.content.decode()}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def http_post(self, url: str, **kwargs: Any) -> requests.Response: + """Make an HTTP POST request + + Returns: + requests.Response: The response from the remote server + + Raises: + DataSafeHavenMicrosoftGraphError if the request failed + """ + try: + response = requests.post( + url, + headers={"Authorization": f"Bearer {self.token}"}, + timeout=120, + **kwargs, + ) + self.http_raise_for_status(response) + + # Wait for operation to complete before returning + time.sleep(30) + return response + except requests.exceptions.RequestException as exc: + msg = f"Could not execute POST request to '{url}'." + if exc.response: + msg += f" Response content received: '{exc.response.content.decode()}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def read_applications(self) -> Sequence[dict[str, Any]]: + """Get list of applications + + Returns: + JSON: A JSON list of applications + + Raises: + DataSafeHavenMicrosoftGraphError if applications could not be loaded + """ + try: + return [ + dict(obj) + for obj in self.http_get(f"{self.base_endpoint}/applications").json()[ + "value" + ] + ] + except Exception as exc: + msg = "Could not load list of applications." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def read_application_permissions( + self, application_service_principal_id: str + ) -> Sequence[dict[str, Any]]: + """Get list of application permissions + + Returns: + JSON: A JSON list of application permissions + + Raises: + DataSafeHavenMicrosoftGraphError if application permissions could not be loaded + """ + try: + delegated = self.http_get( + f"{self.base_endpoint}/servicePrincipals/{application_service_principal_id}/oauth2PermissionGrants", + ).json()["value"] + application = self.http_get( + f"{self.base_endpoint}/servicePrincipals/{application_service_principal_id}/appRoleAssignments", + ).json()["value"] + return [dict(obj) for obj in (delegated + application)] + except Exception as exc: + msg = "Could not load list of application permissions." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def read_domains(self) -> Sequence[dict[str, Any]]: + """Get details of Entra domains + + Returns: + JSON: A JSON list of Entra domains + + Raises: + DataSafeHavenMicrosoftGraphError if domains could not be loaded + """ + try: + json_response = self.http_get(f"{self.base_endpoint}/domains").json() + return [dict(obj) for obj in json_response["value"]] + except Exception as exc: + msg = "Could not load list of domains." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def read_groups( + self, + attributes: Sequence[str] | None = None, + ) -> Sequence[dict[str, Any]]: + """Get details of Entra groups + + Returns: + JSON: A JSON list of Entra ID groups + + Raises: + DataSafeHavenMicrosoftGraphError if groups could not be loaded + """ + try: + endpoint = f"{self.base_endpoint}/groups" + if attributes: + endpoint += f"?$select={','.join(attributes)}" + return [dict(obj) for obj in self.http_get(endpoint).json()["value"]] + except Exception as exc: + msg = "Could not load list of groups." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def read_service_principals(self) -> Sequence[dict[str, Any]]: + """Get list of service principals""" + try: + return [ + dict(obj) + for obj in self.http_get( + f"{self.base_endpoint}/servicePrincipals" + ).json()["value"] + ] + except Exception as exc: + msg = "Could not load list of service principals." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def read_users( + self, attributes: Sequence[str] | None = None + ) -> Sequence[dict[str, Any]]: + """Get details of Entra users + + Returns: + JSON: A JSON list of Entra users + + Raises: + DataSafeHavenMicrosoftGraphError if users could not be loaded + """ + attributes = ( + attributes + if attributes + else [ + "accountEnabled", + "businessPhones", + "displayName", + "givenName", + "id", + "mail", + "mailNickname", + "mobilePhone", + "onPremisesSamAccountName", + "onPremisesSyncEnabled", + "userPrincipalName", + "surname", + "telephoneNumber", + "userPrincipalName", + ] + ) + users: Sequence[dict[str, Any]] + try: + endpoint = f"{self.base_endpoint}/users" + if attributes: + endpoint += f"?$select={','.join(attributes)}" + users = self.http_get(endpoint).json()["value"] + administrators = self.http_get( + f"{self.base_endpoint}/directoryRoles/roleTemplateId=" + f"{self.role_template_ids['Global Administrator']}/members" + ).json()["value"] + for user in users: + user["isGlobalAdmin"] = any( + user["id"] == admin["id"] for admin in administrators + ) + return users + except Exception as exc: + msg = "Could not load list of users." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def remove_user( + self, + username: str, + ) -> None: + """Remove a user from Entra ID + + Raises: + DataSafeHavenMicrosoftGraphError if the user could not be removed + """ + try: + user_id = self.get_id_from_username(username) + # Attempt to remove user from group + self.http_delete( + f"{self.base_endpoint}/users/{user_id}", + ) + return + except Exception as exc: + msg = f"Could not remove user '{username}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def remove_user_from_group( + self, + username: str, + group_name: str, + ) -> None: + """Remove a user from an Entra group + + Raises: + DataSafeHavenMicrosoftGraphError if the user could not be removed + """ + try: + user_id = self.get_id_from_username(username) + group_id = self.get_id_from_groupname(group_name) + # Check whether user is in group + json_response = self.http_get( + f"{self.base_endpoint}/groups/{group_id}/members", + ).json() + # Remove user from group if it is a member + if user_id in ( + group_member["id"] for group_member in json_response["value"] + ): + self.http_delete( + f"{self.base_endpoint}/groups/{group_id}/members/{user_id}/$ref", + ) + self.logger.info( + f"Removed [green]'{username}'[/] from group [green]'{group_name}'[/]." + ) + else: + self.logger.info( + f"User [green]'{username}'[/] does not belong to group [green]'{group_name}'[/]." + ) + except Exception as exc: + msg = f"Could not remove user '{username}' from group '{group_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def verify_custom_domain( + self, domain_name: str, expected_nameservers: Sequence[str] + ) -> None: + """Verify Entra custom domain + + Raises: + DataSafeHavenMicrosoftGraphError if domain could not be verified + """ + try: + # Create the Entra custom domain if it does not already exist + domains = self.read_domains() + if not any(d["id"] == domain_name for d in domains): + msg = f"Domain {domain_name} has not been added to Entra ID." + raise DataSafeHavenMicrosoftGraphError(msg) + # Wait until domain delegation is complete + while True: + # Check whether all expected nameservers are active + with suppress(resolver.NXDOMAIN): + self.logger.debug( + f"Checking [green]{domain_name}[/] domain verification status ..." + ) + active_nameservers = [ + str(ns) for ns in iter(resolver.resolve(domain_name, "NS")) + ] + if all( + any(nameserver in n for n in active_nameservers) + for nameserver in expected_nameservers + ): + self.logger.info( + f"Verified that domain [green]{domain_name}[/] is delegated to Azure." + ) + break + self.logger.warning( + f"Domain [green]{domain_name}[/] is not currently delegated to Azure." + ) + # Prompt user to set domain delegation manually + docs_link = "https://learn.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns#delegate-the-domain" + self.logger.info( + f"To proceed you will need to delegate [green]{domain_name}[/] to Azure ({docs_link})" + ) + ns_list = ", ".join([f"[green]{n}[/]" for n in expected_nameservers]) + self.logger.info( + f"You will need to create an NS record pointing to: {ns_list}" + ) + if not console.confirm( + f"Are you ready to check whether [green]{domain_name}[/] has been delegated to Azure?", + default_to_yes=True, + ): + self.logger.error( + "Please use `az login` to connect to the correct Azure CLI account" + ) + raise typer.Exit(1) + # Send verification request if needed + if not any((d["id"] == domain_name and d["isVerified"]) for d in domains): + response = self.http_post( + f"{self.base_endpoint}/domains/{domain_name}/verify" + ) + if not response.json()["isVerified"]: + raise DataSafeHavenMicrosoftGraphError(response.content) + except Exception as exc: + msg = f"Could not verify domain '{domain_name}'." + raise DataSafeHavenMicrosoftGraphError(msg) from exc diff --git a/data_safe_haven/external/interface/__init__.py b/data_safe_haven/external/interface/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/data_safe_haven/external/interface/azure_container_instance.py b/data_safe_haven/external/interface/azure_container_instance.py new file mode 100644 index 0000000000..7d37a87760 --- /dev/null +++ b/data_safe_haven/external/interface/azure_container_instance.py @@ -0,0 +1,125 @@ +import contextlib +import time + +import websocket +from azure.core.polling import LROPoller +from azure.mgmt.containerinstance import ContainerInstanceManagementClient +from azure.mgmt.containerinstance.models import ( + ContainerExecRequest, + ContainerExecRequestTerminalSize, +) + +from data_safe_haven.exceptions import DataSafeHavenAzureError +from data_safe_haven.external import AzureSdk +from data_safe_haven.logging import get_logger + + +class AzureContainerInstance: + """Interface for Azure container instances.""" + + def __init__( + self, + container_group_name: str, + resource_group_name: str, + subscription_name: str, + ): + self.azure_sdk = AzureSdk(subscription_name) + self.logger = get_logger() + self.resource_group_name = resource_group_name + self.container_group_name = container_group_name + + @staticmethod + def wait(poller: LROPoller[None]) -> None: + while not poller.done(): + time.sleep(10) + + @property + def current_ip_address(self) -> str: + aci_client = ContainerInstanceManagementClient( + self.azure_sdk.credential(), self.azure_sdk.subscription_id + ) + ip_address = aci_client.container_groups.get( + self.resource_group_name, self.container_group_name + ).ip_address + if ip_address and isinstance(ip_address.ip, str): + return ip_address.ip + msg = f"Could not determine IP address for container group {self.container_group_name}." + raise DataSafeHavenAzureError(msg) + + def restart(self, target_ip_address: str | None = None) -> None: + """Restart the container group""" + # Connect to Azure clients + try: + aci_client = ContainerInstanceManagementClient( + self.azure_sdk.credential(), self.azure_sdk.subscription_id + ) + if not target_ip_address: + target_ip_address = self.current_ip_address + + # Restart container group + self.logger.debug( + f"Restarting container group [green]{self.container_group_name}[/]" + f" with IP address [green]{target_ip_address}[/]...", + ) + while True: + if ( + aci_client.container_groups.get( + self.resource_group_name, self.container_group_name + ).provisioning_state + == "Succeeded" + ): + self.wait( + aci_client.container_groups.begin_restart( + self.resource_group_name, self.container_group_name + ) + ) + else: + self.wait( + aci_client.container_groups.begin_start( + self.resource_group_name, self.container_group_name + ) + ) + if self.current_ip_address == target_ip_address: + break + self.logger.info( + f"Restarted container group [green]{self.container_group_name}[/]" + f" with IP address [green]{self.current_ip_address}[/].", + ) + except Exception as exc: + msg = f"Could not restart container group {self.container_group_name}." + raise DataSafeHavenAzureError(msg) from exc + + def run_executable(self, container_name: str, executable_path: str) -> list[str]: + """ + Run a script or command on one of the containers. + + It is possible to provide arguments to the command if needed. + The most likely use-case is running a script already present in the container. + """ + # Connect to Azure clients + aci_client = ContainerInstanceManagementClient( + self.azure_sdk.credential(), self.azure_sdk.subscription_id + ) + + # Run command + cnxn = aci_client.containers.execute_command( + self.resource_group_name, + self.container_group_name, + container_name, + ContainerExecRequest( + command=executable_path, + terminal_size=ContainerExecRequestTerminalSize(cols=80, rows=500), + ), + ) + + # Get command output via websocket + socket = websocket.create_connection(cnxn.web_socket_uri) + if cnxn.password: + socket.send(cnxn.password) + output = [] + with contextlib.suppress(websocket.WebSocketConnectionClosedException): + while result := socket.recv(): + for line in [line.strip() for line in result.splitlines()]: + output.append(line) + socket.close() + return output diff --git a/data_safe_haven/external/interface/azure_ipv4_range.py b/data_safe_haven/external/interface/azure_ipv4_range.py new file mode 100644 index 0000000000..fdebce774f --- /dev/null +++ b/data_safe_haven/external/interface/azure_ipv4_range.py @@ -0,0 +1,59 @@ +import ipaddress +import math +from contextlib import suppress + +from data_safe_haven.exceptions import DataSafeHavenIPRangeError + + +class AzureIPv4Range(ipaddress.IPv4Network): + """Azure-aware IPv4 address range""" + + def __init__( + self, + ip_address_first: str | ipaddress.IPv4Address, + ip_address_last: str | ipaddress.IPv4Address, + ): + networks = list( + ipaddress.summarize_address_range( + ipaddress.ip_address(ip_address_first), + ipaddress.ip_address(ip_address_last), + ) + ) + if len(networks) != 1: + msg = f"{ip_address_first}-{ip_address_last} cannot be expressed as a single network range." + raise DataSafeHavenIPRangeError(msg) + super().__init__(networks[0]) + self._subnets: list[AzureIPv4Range] = [] + + @classmethod + def from_cidr(cls, ip_cidr: str) -> "AzureIPv4Range": + network = ipaddress.IPv4Network(ip_cidr) + return cls(network[0], network[-1]) + + @property + def prefix(self) -> str: + return str(self) + + def all_ips(self) -> list[ipaddress.IPv4Address]: + """All IP addresses in the range""" + return list(self.hosts()) + + def available(self) -> list[ipaddress.IPv4Address]: + """Azure reserves x.x.x.1 for the default gateway and (x.x.x.2, x.x.x.3) to map Azure DNS IPs.""" + return list(self.all_ips())[3:] + + def next_subnet(self, number_of_addresses: int) -> "AzureIPv4Range": + """Find the next unused subnet of a given size""" + if not math.log2(number_of_addresses).is_integer(): + msg = f"Number of address '{number_of_addresses}' must be a power of 2" + raise DataSafeHavenIPRangeError(msg) + ip_address_first = self[0] + while True: + ip_address_last = ip_address_first + int(number_of_addresses - 1) + with suppress(DataSafeHavenIPRangeError): + candidate = AzureIPv4Range(ip_address_first, ip_address_last) + if not any(subnet.overlaps(candidate) for subnet in self._subnets): + self._subnets.append(candidate) + break + ip_address_first = ip_address_first + number_of_addresses + return candidate diff --git a/data_safe_haven/external/interface/azure_postgresql_database.py b/data_safe_haven/external/interface/azure_postgresql_database.py new file mode 100644 index 0000000000..312acaa11b --- /dev/null +++ b/data_safe_haven/external/interface/azure_postgresql_database.py @@ -0,0 +1,231 @@ +import datetime +import pathlib +import time +from collections.abc import Sequence +from typing import Any, cast + +import psycopg +from azure.core.polling import LROPoller +from azure.mgmt.rdbms.postgresql_flexibleservers import PostgreSQLManagementClient +from azure.mgmt.rdbms.postgresql_flexibleservers.models import FirewallRule, Server + +from data_safe_haven.exceptions import DataSafeHavenAzureError, DataSafeHavenValueError +from data_safe_haven.external import AzureSdk +from data_safe_haven.functions import current_ip_address +from data_safe_haven.logging import get_logger +from data_safe_haven.types import PathType +from data_safe_haven.utility import FileReader + + +class AzurePostgreSQLDatabase: + """Interface for Azure PostgreSQL databases.""" + + current_ip: str + db_client_: PostgreSQLManagementClient | None + db_name: str + db_server_: Server | None + db_server_admin_password: str + resource_group_name: str + server_name: str + rule_suffix: str + + def __init__( + self, + database_name: str, + database_server_admin_password: str, + database_server_name: str, + resource_group_name: str, + subscription_name: str, + ) -> None: + self.azure_sdk = AzureSdk(subscription_name) + self.current_ip = current_ip_address() + self.db_client_ = None + self.db_name = database_name + self.db_server_ = None + self.db_server_admin_password = database_server_admin_password + self.logger = get_logger() + self.port = 5432 + self.resource_group_name = resource_group_name + self.server_name = database_server_name + self.rule_suffix = datetime.datetime.now(tz=datetime.UTC).strftime( + r"%Y%m%d-%H%M%S" + ) + + @staticmethod + def wait(poller: LROPoller[Any]) -> None: + """Wait for a polling operation to finish.""" + while not poller.done(): + time.sleep(10) + + @property + def connection_string(self) -> str: + return " ".join( + [ + f"dbname={self.db_name}", + f"host={self.db_server.fully_qualified_domain_name}", + f"password={self.db_server_admin_password}", + f"port={self.port}", + f"user={self.db_server.administrator_login}", + "sslmode=require", + ] + ) + + @property + def db_client(self) -> PostgreSQLManagementClient: + """Get the database client.""" + if not self.db_client_: + self.db_client_ = PostgreSQLManagementClient( + self.azure_sdk.credential(), self.azure_sdk.subscription_id + ) + return self.db_client_ + + @property + def db_server(self) -> Server: + """Get the database server.""" + # self.logger.debug(f"Connecting to database using {self.connection_string}") + if not self.db_server_: + self.db_server_ = self.db_client.servers.get( + self.resource_group_name, self.server_name + ) + return self.db_server_ + + def db_connection(self, n_retries: int = 0) -> psycopg.Connection: + """Get the database connection.""" + while True: + try: + try: + connection = psycopg.connect(self.connection_string) + break + except psycopg.OperationalError as exc: + if n_retries <= 0: + raise exc + n_retries -= 1 + time.sleep(10) + except Exception as exc: + msg = "Could not connect to database." + raise DataSafeHavenAzureError(msg) from exc + return connection + + def load_sql( + self, filepath: PathType, mustache_values: dict[str, str] | None = None + ) -> str: + """Load filepath into a single SQL string.""" + reader = FileReader(filepath) + # Strip any comment lines + sql_lines = [ + line.split("--")[0] + for line in reader.file_contents(mustache_values).split("\n") + ] + # Join into a single SQL string + return " ".join([line for line in sql_lines if line]) + + def execute_scripts( + self, + filepaths: Sequence[PathType], + mustache_values: dict[str, Any] | None = None, + ) -> list[list[str]]: + """Execute scripts on the PostgreSQL server.""" + outputs: list[list[str]] = [] + connection: psycopg.Connection | None = None + cursor = None + + try: + # Add temporary firewall rule + self.set_database_access("enabled") + + # Connect to the database and get a cursor to perform database operations + connection = self.db_connection(n_retries=1) + cursor = connection.cursor() + + # Apply the Guacamole initialisation script + for filepath in filepaths: + _filepath = pathlib.Path(filepath) + self.logger.info(f"Running SQL script: [green]{_filepath.name}[/].") + commands = self.load_sql(_filepath, mustache_values) + cursor.execute(query=commands.encode()) + if cursor.statusmessage and "SELECT" in cursor.statusmessage: + outputs += [[str(msg) for msg in msg_tuple] for msg_tuple in cursor] + + # Commit changes + connection.commit() + self.logger.debug(f"Finished running {len(filepaths)} SQL scripts.") + except (Exception, psycopg.Error) as exc: + msg = "Error while connecting to PostgreSQL." + raise DataSafeHavenAzureError(msg) from exc + finally: + # Close the connection if it is open + if connection: + if cursor: + cursor.close() + connection.close() + # Remove temporary firewall rules + self.set_database_access("disabled") + return outputs + + def set_database_access(self, action: str) -> None: + """Enable/disable database access to the PostgreSQL server.""" + if action == "enabled": + self.logger.debug( + f"Adding temporary firewall rule for [green]{self.current_ip}[/]...", + ) + # NB. We would like to enable public_network_access at this point but this + # is not currently supported by the flexibleServer API + self.wait( + self.db_client.firewall_rules.begin_create_or_update( + self.resource_group_name, + self.server_name, + f"AllowConfigurationUpdate-{self.rule_suffix}", + FirewallRule( + start_ip_address=self.current_ip, end_ip_address=self.current_ip + ), + ) + ) + self.db_connection(n_retries=5) + self.logger.debug( + f"Added temporary firewall rule for [green]{self.current_ip}[/].", + ) + elif action == "disabled": + self.logger.debug( + f"Removing all firewall rule(s) from [green]{self.server_name}[/]...", + ) + rules = [ + # N.B. `list_by_server` returns FirewallRule, not FirewallRuleResult as + # its typehint currently suggests - we cast to the correct type. + cast(FirewallRule, rule) + for rule in self.db_client.firewall_rules.list_by_server( + self.resource_group_name, self.server_name + ) + ] + + # Delete all named firewall rules + rule_names = [str(rule.name) for rule in rules if rule.name] + for rule_name in rule_names: + self.wait( + self.db_client.firewall_rules.begin_delete( + self.resource_group_name, self.server_name, rule_name + ) + ) + + # NB. We would like to disable public_network_access at this point but this + # is not currently supported by the flexibleServer API + if len(rule_names) == len(rules): + self.logger.debug( + f"Removed all firewall rule(s) from [green]{self.server_name}[/].", + ) + else: + self.logger.warning( + f"Unable to remove all firewall rule(s) from [green]{self.server_name}[/].", + ) + else: + msg = f"Database access action {action} was not recognised." + raise DataSafeHavenValueError(msg) + self.db_server_ = None # Force refresh of self.db_server + public_network_access = ( + self.db_server.network.public_network_access + if self.db_server.network + else "UNKNOWN" + ) + self.logger.debug( + f"Public network access to [green]{self.server_name}[/]" + f" is [green]{public_network_access}[/]." + ) diff --git a/data_safe_haven/external/interface/pulumi_account.py b/data_safe_haven/external/interface/pulumi_account.py new file mode 100644 index 0000000000..4db66c52c8 --- /dev/null +++ b/data_safe_haven/external/interface/pulumi_account.py @@ -0,0 +1,43 @@ +"""Manage Pulumi accounts""" + +from shutil import which +from typing import Any + +from data_safe_haven.exceptions import DataSafeHavenPulumiError +from data_safe_haven.external import AzureSdk + + +class PulumiAccount: + """Manage and interact with Pulumi backend account""" + + def __init__( + self, + resource_group_name: str, + storage_account_name: str, + subscription_name: str, + ): + self.resource_group_name = resource_group_name + self.storage_account_name = storage_account_name + self.subscription_name = subscription_name + self._env: dict[str, Any] | None = None + + # Ensure that Pulumi executable can be found + if which("pulumi") is None: + msg = "Unable to find Pulumi CLI executable in your path.\nPlease ensure that Pulumi is installed" + raise DataSafeHavenPulumiError(msg) + + @property + def env(self) -> dict[str, Any]: + """Get necessary Pulumi environment variables""" + if not self._env: + azure_sdk = AzureSdk(self.subscription_name) + storage_account_keys = azure_sdk.get_storage_account_keys( + self.resource_group_name, + self.storage_account_name, + ) + self._env = { + "AZURE_STORAGE_ACCOUNT": self.storage_account_name, + "AZURE_STORAGE_KEY": str(storage_account_keys[0].value), + "AZURE_KEYVAULT_AUTH_VIA_CLI": "true", + } + return self._env diff --git a/data_safe_haven/functions/__init__.py b/data_safe_haven/functions/__init__.py new file mode 100644 index 0000000000..e11b326135 --- /dev/null +++ b/data_safe_haven/functions/__init__.py @@ -0,0 +1,28 @@ +from .network import current_ip_address, ip_address_in_list +from .strings import ( + alphanumeric, + b64encode, + get_key_vault_name, + json_safe, + next_occurrence, + password, + replace_separators, + seeded_uuid, + sha256hash, + truncate_tokens, +) + +__all__ = [ + "alphanumeric", + "b64encode", + "current_ip_address", + "get_key_vault_name", + "ip_address_in_list", + "json_safe", + "next_occurrence", + "password", + "replace_separators", + "seeded_uuid", + "sha256hash", + "truncate_tokens", +] diff --git a/data_safe_haven/functions/network.py b/data_safe_haven/functions/network.py new file mode 100644 index 0000000000..70847e94dc --- /dev/null +++ b/data_safe_haven/functions/network.py @@ -0,0 +1,42 @@ +import ipaddress +from collections.abc import Sequence + +import requests + +from data_safe_haven.exceptions import DataSafeHavenValueError + + +def current_ip_address() -> str: + """ + Get the IP address of the current device. + + Returns: + str: the IP address + + Raises: + DataSafeHavenValueError: if the current IP address could not be determined + """ + try: + response = requests.get("https://api.ipify.org", timeout=300) + response.raise_for_status() + return response.content.decode("utf8") + except requests.RequestException as exc: + msg = "Could not determine IP address." + raise DataSafeHavenValueError(msg) from exc + + +def ip_address_in_list(ip_address_list: Sequence[str]) -> bool: + """ + Check whether current IP address belongs to a list of authorised addresses + + Returns: + bool: True if in list, False if not + + Raises: + DataSafeHavenValueError: if the current IP address could not be determined + """ + current_cidr = ipaddress.IPv4Network(current_ip_address()) + return any( + ipaddress.IPv4Network(authorised_cidr).supernet_of(current_cidr) + for authorised_cidr in ip_address_list + ) diff --git a/data_safe_haven/functions/strings.py b/data_safe_haven/functions/strings.py new file mode 100644 index 0000000000..0d5b06b33e --- /dev/null +++ b/data_safe_haven/functions/strings.py @@ -0,0 +1,132 @@ +import base64 +import datetime +import hashlib +import random +import secrets +import string +import uuid +from collections.abc import Sequence + +import pytz + +from data_safe_haven.exceptions import DataSafeHavenValueError + + +def alphanumeric(input_string: str) -> str: + """Strip any characters that are not letters or numbers from a string.""" + return "".join(filter(str.isalnum, input_string)) + + +def b64encode(input_string: str) -> str: + """Encode a normal string into a Base64 string.""" + return base64.b64encode(input_string.encode("utf-8")).decode() + + +def get_key_vault_name(stack_name: str) -> str: + """Key Vault names have a maximum of 24 characters""" + return f"{''.join(truncate_tokens(stack_name.split('-'), 17))}secrets" + + +def json_safe(input_string: str) -> str: + """Construct a JSON-safe version of an input string""" + return alphanumeric(input_string).lower() + + +def next_occurrence( + hour: int, minute: int, timezone: str, *, time_format: str = "iso" +) -> str: + """ + Get an ISO-formatted string representing the next occurence in UTC of a daily + repeating time in the local timezone. + + Args: + hour: hour in the local timezone + minute: minute in the local timezone + timezone: string representation of the local timezone + time_format: either 'iso' (YYYY-MM-DDTHH:MM:SS.mmmmmm) or 'iso_minute' (YYYY-MM-DD HH:MM) + """ + try: + local_tz = pytz.timezone(timezone) + local_dt = datetime.datetime.now(local_tz).replace( + hour=hour, + minute=minute, + second=0, + microsecond=0, + ) + utc_dt = local_dt.astimezone(pytz.utc) + # Add one day until this datetime is at least 1 hour in the future. + # This ensures that any Azure functions which depend on this datetime being in + # the future should treat it as valid. + utc_near_future = datetime.datetime.now(pytz.utc) + datetime.timedelta(hours=1) + while utc_dt < utc_near_future: + utc_dt += datetime.timedelta(days=1) + if time_format == "iso": + return utc_dt.isoformat() + elif time_format == "iso_minute": + return utc_dt.strftime(r"%Y-%m-%d %H:%M") + else: + msg = f"Time format '{time_format}' was not recognised." + raise DataSafeHavenValueError(msg) + except pytz.exceptions.UnknownTimeZoneError as exc: + msg = f"Timezone '{timezone}' was not recognised." + raise DataSafeHavenValueError(msg) from exc + except ValueError as exc: + msg = f"Time '{hour}:{minute}' was not recognised." + raise DataSafeHavenValueError(msg) from exc + + +def password(length: int) -> str: + """ + Generate a string of 'length' random alphanumeric characters. + Require at least one lower-case, one upper-case and one digit. + """ + alphabet = string.ascii_letters + string.digits + while True: + password_ = "".join(secrets.choice(alphabet) for _ in range(length)) + if ( + any(c.islower() for c in password_) + and any(c.isupper() for c in password_) + and any(c.isdigit() for c in password_) + ): + break + return password_ + + +def replace_separators(input_string: str, separator: str = "") -> str: + """Return a string replacing all instances of [ _-.] with the desired separator.""" + return ( + input_string.replace(" ", separator) + .replace("_", separator) + .replace("-", separator) + .replace(".", separator) + ) + + +def seeded_uuid(seed: str) -> uuid.UUID: + """Return a UUID seeded from a given string.""" + generator = random.Random() # noqa: S311 + generator.seed(seed) + return uuid.UUID(int=generator.getrandbits(128), version=4) + + +def sha256hash(input_string: str) -> str: + """Return the SHA256 hash of a string as a string.""" + return hashlib.sha256(input_string.encode("utf-8")).hexdigest() + + +def truncate_tokens(tokens: Sequence[str], max_length: int) -> list[str]: + """ + Recursively remove the final character from the longest strings in the input. + Terminate when the total length of all strings is no greater than max_length. + For example: + truncate_tokens(["the", "quick", "fox"], 6) -> ["th", "qu", "fo"] + """ + output_tokens = list(tokens) + token_lengths = [len(t) for t in output_tokens] + while sum(token_lengths) > max_length: + for idx in range(len(output_tokens)): + if len(output_tokens[idx]) == max(token_lengths): + output_tokens[idx] = output_tokens[idx][:-1] + token_lengths[idx] -= 1 + break + return output_tokens diff --git a/data_safe_haven/infrastructure/__init__.py b/data_safe_haven/infrastructure/__init__.py new file mode 100644 index 0000000000..fd00f8ca11 --- /dev/null +++ b/data_safe_haven/infrastructure/__init__.py @@ -0,0 +1,7 @@ +from .programs import ImperativeSHM +from .project_manager import SREProjectManager + +__all__ = [ + "ImperativeSHM", + "SREProjectManager", +] diff --git a/data_safe_haven/infrastructure/common/__init__.py b/data_safe_haven/infrastructure/common/__init__.py new file mode 100644 index 0000000000..6106cac731 --- /dev/null +++ b/data_safe_haven/infrastructure/common/__init__.py @@ -0,0 +1,32 @@ +from .dockerhub_credentials import DockerHubCredentials +from .ip_ranges import SREDnsIpRanges, SREIpRanges +from .transformations import ( + get_address_prefixes_from_subnet, + get_available_ips_from_subnet, + get_id_from_rg, + get_id_from_subnet, + get_id_from_vnet, + get_ip_address_from_container_group, + get_ip_addresses_from_private_endpoint, + get_name_from_rg, + get_name_from_subnet, + get_name_from_vnet, + get_subscription_id_from_rg, +) + +__all__ = [ + "DockerHubCredentials", + "get_address_prefixes_from_subnet", + "get_available_ips_from_subnet", + "get_id_from_rg", + "get_id_from_subnet", + "get_id_from_vnet", + "get_ip_address_from_container_group", + "get_ip_addresses_from_private_endpoint", + "get_name_from_rg", + "get_name_from_subnet", + "get_name_from_vnet", + "get_subscription_id_from_rg", + "SREDnsIpRanges", + "SREIpRanges", +] diff --git a/data_safe_haven/infrastructure/common/dockerhub_credentials.py b/data_safe_haven/infrastructure/common/dockerhub_credentials.py new file mode 100644 index 0000000000..b41255fcf7 --- /dev/null +++ b/data_safe_haven/infrastructure/common/dockerhub_credentials.py @@ -0,0 +1,8 @@ +from dataclasses import dataclass + + +@dataclass +class DockerHubCredentials: + access_token: str + server: str + username: str diff --git a/data_safe_haven/infrastructure/common/ip_ranges.py b/data_safe_haven/infrastructure/common/ip_ranges.py new file mode 100644 index 0000000000..f0613e577a --- /dev/null +++ b/data_safe_haven/infrastructure/common/ip_ranges.py @@ -0,0 +1,36 @@ +"""Calculate SRE IP address ranges for a given SRE index""" + +from dataclasses import dataclass + +from data_safe_haven.external import AzureIPv4Range + + +@dataclass(frozen=True) +class SREIpRanges: + """Calculate SRE IP address ranges for a given SRE index""" + + vnet = AzureIPv4Range("10.0.0.0", "10.0.255.255") + application_gateway = vnet.next_subnet(256) + apt_proxy_server = vnet.next_subnet(8) + clamav_mirror = vnet.next_subnet(8) + data_configuration = vnet.next_subnet(8) + data_desired_state = vnet.next_subnet(8) + data_private = vnet.next_subnet(8) + firewall = vnet.next_subnet(64) # 64 address minimum + firewall_management = vnet.next_subnet(64) # 64 address minimum + guacamole_containers = vnet.next_subnet(8) + guacamole_containers_support = vnet.next_subnet(8) + identity_containers = vnet.next_subnet(8) + monitoring = vnet.next_subnet(32) + user_services_containers = vnet.next_subnet(8) + user_services_containers_support = vnet.next_subnet(8) + user_services_databases = vnet.next_subnet(8) + user_services_software_repositories = vnet.next_subnet(8) + workspaces = vnet.next_subnet(256) + + +@dataclass(frozen=True) +class SREDnsIpRanges: + """Calculate SRE DNS IP address ranges.""" + + vnet = AzureIPv4Range("192.168.0.0", "192.168.0.7") diff --git a/data_safe_haven/infrastructure/common/transformations.py b/data_safe_haven/infrastructure/common/transformations.py new file mode 100644 index 0000000000..9813709c9e --- /dev/null +++ b/data_safe_haven/infrastructure/common/transformations.py @@ -0,0 +1,113 @@ +"""Common transformations needed when manipulating Pulumi resources""" + +from pulumi import Output +from pulumi_azure_native import containerinstance, network, resources + +from data_safe_haven.exceptions import DataSafeHavenPulumiError +from data_safe_haven.external import AzureIPv4Range + + +def get_address_prefixes_from_subnet(subnet: network.GetSubnetResult) -> list[str]: + """Get list of CIDRs belonging to this subnet""" + if address_prefixes := subnet.address_prefixes: + return [str(p) for p in address_prefixes] + if address_prefix := subnet.address_prefix: + return [address_prefix] + msg = f"Subnet '{subnet}' has no address prefix." + raise DataSafeHavenPulumiError(msg) + + +def get_available_ips_from_subnet(subnet: network.GetSubnetResult) -> list[str]: + """Get list of available IP addresses from a subnet""" + if address_prefix := subnet.address_prefix: + return [str(ip) for ip in AzureIPv4Range.from_cidr(address_prefix).available()] + return [] + + +def get_id_from_rg(rg: resources.ResourceGroup) -> Output[str]: + """Get the ID of a resource group""" + if isinstance(rg.id, Output): + return rg.id + msg = f"Resource group '{rg}' has no ID." + raise DataSafeHavenPulumiError(msg) + + +def get_id_from_subnet(subnet: network.GetSubnetResult) -> str: + """Get the ID of a subnet""" + if id_ := subnet.id: + return str(id_) + msg = f"Subnet '{subnet}' has no ID." + raise DataSafeHavenPulumiError(msg) + + +def get_id_from_vnet(vnet: network.VirtualNetwork) -> Output[str]: + """Get the ID of a virtual network""" + if isinstance(vnet.id, Output): + return vnet.id + msg = f"Virtual network '{vnet}' has no ID." + raise DataSafeHavenPulumiError(msg) + + +def get_ip_address_from_container_group( + container_group: containerinstance.ContainerGroup, +) -> Output[str]: + """Get the IP address of a container group""" + return container_group.ip_address.apply( + lambda ip_address: ( + (ip_address.ip if ip_address.ip else "") if ip_address else "" + ) + ) + + +def get_ip_addresses_from_private_endpoint( + endpoint: network.PrivateEndpoint, +) -> Output[list[str]]: + """Get a list of IP addresses from a private endpoint""" + if isinstance(endpoint.custom_dns_configs, Output): + return endpoint.custom_dns_configs.apply( + lambda cfgs: ( + list( + { + ip_address + for cfg in cfgs + for ip_address in (cfg.ip_addresses if cfg.ip_addresses else []) + } + ) + if cfgs + else [] + ) + ) + msg = f"Private endpoint '{endpoint}' has no IP addresses." + raise DataSafeHavenPulumiError(msg) + + +def get_name_from_rg(rg: resources.ResourceGroup) -> Output[str]: + """Get the name of a resource group""" + if isinstance(rg.name, Output): + return rg.name.apply(str) + msg = f"Resource group '{rg}' has no name." + raise DataSafeHavenPulumiError(msg) + + +def get_name_from_subnet(subnet: network.GetSubnetResult) -> str: + """Get the name of a subnet""" + if name := subnet.name: + return str(name) + msg = f"Subnet '{subnet}' has no name." + raise DataSafeHavenPulumiError(msg) + + +def get_name_from_vnet(vnet: network.VirtualNetwork) -> Output[str]: + """Get the name of a virtual network""" + if isinstance(vnet.name, Output): + return vnet.name.apply(str) + msg = f"Virtual network '{vnet}' has no name." + raise DataSafeHavenPulumiError(msg) + + +def get_subscription_id_from_rg(rg: resources.ResourceGroup) -> Output[str]: + """Get the ID of a subscription from a resource group""" + if isinstance(rg.id, Output): + return rg.id.apply(lambda id_: id_.split("/resourceGroups/")[0]) + msg = f"Could not extract subscription ID from resource group '{rg}'." + raise DataSafeHavenPulumiError(msg) diff --git a/data_safe_haven/infrastructure/components/__init__.py b/data_safe_haven/infrastructure/components/__init__.py new file mode 100644 index 0000000000..cc6bcb15a4 --- /dev/null +++ b/data_safe_haven/infrastructure/components/__init__.py @@ -0,0 +1,43 @@ +from .composite import ( + LinuxVMComponentProps, + LocalDnsRecordComponent, + LocalDnsRecordProps, + MicrosoftSQLDatabaseComponent, + MicrosoftSQLDatabaseProps, + PostgresqlDatabaseComponent, + PostgresqlDatabaseProps, + VMComponent, +) +from .dynamic import ( + BlobContainerAcl, + BlobContainerAclProps, + EntraApplication, + EntraApplicationProps, + FileShareFile, + FileShareFileProps, + SSLCertificate, + SSLCertificateProps, +) +from .wrapped import ( + WrappedLogAnalyticsWorkspace, +) + +__all__ = [ + "BlobContainerAcl", + "BlobContainerAclProps", + "EntraApplication", + "EntraApplicationProps", + "FileShareFile", + "FileShareFileProps", + "LinuxVMComponentProps", + "LocalDnsRecordComponent", + "LocalDnsRecordProps", + "MicrosoftSQLDatabaseComponent", + "MicrosoftSQLDatabaseProps", + "PostgresqlDatabaseComponent", + "PostgresqlDatabaseProps", + "SSLCertificate", + "SSLCertificateProps", + "VMComponent", + "WrappedLogAnalyticsWorkspace", +] diff --git a/data_safe_haven/infrastructure/components/composite/__init__.py b/data_safe_haven/infrastructure/components/composite/__init__.py new file mode 100644 index 0000000000..f111bab028 --- /dev/null +++ b/data_safe_haven/infrastructure/components/composite/__init__.py @@ -0,0 +1,18 @@ +from .local_dns_record import LocalDnsRecordComponent, LocalDnsRecordProps +from .microsoft_sql_database import ( + MicrosoftSQLDatabaseComponent, + MicrosoftSQLDatabaseProps, +) +from .postgresql_database import PostgresqlDatabaseComponent, PostgresqlDatabaseProps +from .virtual_machine import LinuxVMComponentProps, VMComponent + +__all__ = [ + "LinuxVMComponentProps", + "LocalDnsRecordComponent", + "LocalDnsRecordProps", + "MicrosoftSQLDatabaseComponent", + "MicrosoftSQLDatabaseProps", + "PostgresqlDatabaseComponent", + "PostgresqlDatabaseProps", + "VMComponent", +] diff --git a/data_safe_haven/infrastructure/components/composite/local_dns_record.py b/data_safe_haven/infrastructure/components/composite/local_dns_record.py new file mode 100644 index 0000000000..cefea5fa39 --- /dev/null +++ b/data_safe_haven/infrastructure/components/composite/local_dns_record.py @@ -0,0 +1,68 @@ +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import network + + +class LocalDnsRecordProps: + """Properties for LocalDnsRecordComponent""" + + def __init__( + self, + base_fqdn: Input[str], + private_ip_address: Input[str], + record_name: Input[str], + resource_group_name: Input[str], + ) -> None: + self.base_fqdn = base_fqdn + self.private_ip_address = private_ip_address + self.record_name = record_name + self.resource_group_name = resource_group_name + + +class LocalDnsRecordComponent(ComponentResource): + """Deploy public and private DNS records with Pulumi""" + + def __init__( + self, + name: str, + props: LocalDnsRecordProps, + opts: ResourceOptions | None = None, + ) -> None: + super().__init__("dsh:common:LocalDnsRecordComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + + # Register the resource in a private DNS zone + private_dns_record_set = network.PrivateRecordSet( + f"{self._name}_private_record_set", + a_records=[ + network.ARecordArgs( + ipv4_address=props.private_ip_address, + ) + ], + private_zone_name=Output.concat("privatelink.", props.base_fqdn), + record_type="A", + relative_record_set_name=props.record_name, + resource_group_name=props.resource_group_name, + ttl=30, + opts=child_opts, + ) + + # Redirect the public DNS to private DNS + public_dns_record_set = network.RecordSet( + f"{self._name}_public_record_set", + cname_record=network.CnameRecordArgs( + cname=Output.concat(props.record_name, ".privatelink.", props.base_fqdn) + ), + record_type="CNAME", + relative_record_set_name=props.record_name, + resource_group_name=props.resource_group_name, + ttl=3600, + zone_name=props.base_fqdn, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=private_dns_record_set) + ), + ) + + # Register outputs + self.hostname = public_dns_record_set.fqdn.apply( + lambda s: s.strip(".") # strip trailing "." + ) diff --git a/data_safe_haven/infrastructure/components/composite/microsoft_sql_database.py b/data_safe_haven/infrastructure/components/composite/microsoft_sql_database.py new file mode 100644 index 0000000000..47f24b293f --- /dev/null +++ b/data_safe_haven/infrastructure/components/composite/microsoft_sql_database.py @@ -0,0 +1,112 @@ +from collections.abc import Mapping, Sequence + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import network, sql + +from data_safe_haven.infrastructure.common import get_ip_addresses_from_private_endpoint + + +class MicrosoftSQLDatabaseProps: + """Properties for MicrosoftSQLDatabaseComponent""" + + def __init__( + self, + database_names: Input[Sequence[str]], + database_password: Input[str], + database_resource_group_name: Input[str], + database_server_name: Input[str], + database_subnet_id: Input[str], + database_username: Input[str], + location: Input[str], + ) -> None: + self.database_names = Output.from_input(database_names) + self.database_password = database_password + self.database_resource_group_name = database_resource_group_name + self.database_server_name = database_server_name + self.database_subnet_id = database_subnet_id + self.database_username = database_username + self.location = location + + +class MicrosoftSQLDatabaseComponent(ComponentResource): + """Deploy a Microsoft SQL database server with Pulumi""" + + def __init__( + self, + name: str, + props: MicrosoftSQLDatabaseProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:common:MicrosoftSQLDatabaseComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = tags if tags else {} + + # Define a Microsoft SQL server + db_server = sql.Server( + f"{self._name}_server", + administrator_login=props.database_username, + administrator_login_password=props.database_password, + location=props.location, + minimal_tls_version=None, + public_network_access=sql.ServerNetworkAccessFlag.DISABLED, + resource_group_name=props.database_resource_group_name, + server_name=props.database_server_name, + version="12.0", + opts=child_opts, + tags=child_tags, + ) + + # Add any databases that are requested + props.database_names.apply( + lambda db_names: [ + sql.Database( + f"{self._name}_db_{db_name}", + database_name=db_name, + location=props.location, + resource_group_name=props.database_resource_group_name, + server_name=db_server.name, + sku=sql.SkuArgs( + capacity=1, + family="Gen5", + name="GP_S_Gen5", + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=db_server) + ), + tags=child_tags, + ) + for db_name in db_names + ] + ) + # Deploy a private endpoint for the PostgreSQL server + private_endpoint = network.PrivateEndpoint( + f"{self._name}_private_endpoint", + location=props.location, + private_endpoint_name=Output.concat( + props.database_server_name, "-endpoint" + ), + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["sqlServer"], + name=Output.concat(props.database_server_name, "-privatelink"), + private_link_service_connection_state=network.PrivateLinkServiceConnectionStateArgs( + actions_required="None", + description="Auto-approved", + status="Approved", + ), + private_link_service_id=db_server.id, + ) + ], + resource_group_name=props.database_resource_group_name, + subnet=network.SubnetArgs(id=props.database_subnet_id), + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=db_server)), + tags=child_tags, + ) + + # Register outputs + self.db_server = db_server + self.private_endpoint = private_endpoint + self.private_ip_address = get_ip_addresses_from_private_endpoint( + private_endpoint + ).apply(lambda ips: ips[0]) diff --git a/data_safe_haven/infrastructure/components/composite/postgresql_database.py b/data_safe_haven/infrastructure/components/composite/postgresql_database.py new file mode 100644 index 0000000000..4c0b74b477 --- /dev/null +++ b/data_safe_haven/infrastructure/components/composite/postgresql_database.py @@ -0,0 +1,145 @@ +from collections.abc import Mapping, Sequence + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import dbforpostgresql, network + +from data_safe_haven.infrastructure.common import get_ip_addresses_from_private_endpoint + + +class PostgresqlDatabaseProps: + """Properties for PostgresqlDatabaseComponent""" + + def __init__( + self, + *, + database_names: Input[Sequence[str]], + database_password: Input[str], + database_resource_group_name: Input[str], + database_server_name: Input[str], + database_subnet_id: Input[str], + database_username: Input[str], + disable_secure_transport: bool, + location: Input[str], + ) -> None: + self.database_names = Output.from_input(database_names) + self.database_password = Output.secret(database_password) + self.database_resource_group_name = database_resource_group_name + self.database_server_name = database_server_name + self.database_subnet_id = database_subnet_id + self.database_username = database_username + self.disable_secure_transport = disable_secure_transport + self.location = location + + +class PostgresqlDatabaseComponent(ComponentResource): + """Deploy PostgreSQL database server with Pulumi""" + + def __init__( + self, + name: str, + props: PostgresqlDatabaseProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:common:PostgresqlDatabaseComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = tags if tags else {} + + # Define a PostgreSQL server + db_server = dbforpostgresql.Server( + f"{self._name}_server", + administrator_login=props.database_username, + administrator_login_password=props.database_password, + auth_config=dbforpostgresql.AuthConfigArgs( + active_directory_auth=dbforpostgresql.ActiveDirectoryAuthEnum.DISABLED, + password_auth=dbforpostgresql.PasswordAuthEnum.ENABLED, + ), + backup=dbforpostgresql.BackupArgs( + backup_retention_days=7, + geo_redundant_backup=dbforpostgresql.GeoRedundantBackupEnum.DISABLED, + ), + create_mode=dbforpostgresql.CreateMode.DEFAULT, + data_encryption=dbforpostgresql.DataEncryptionArgs( + type=dbforpostgresql.ArmServerKeyType.SYSTEM_MANAGED, + ), + high_availability=dbforpostgresql.HighAvailabilityArgs( + mode=dbforpostgresql.HighAvailabilityMode.DISABLED, + ), + location=props.location, + resource_group_name=props.database_resource_group_name, + server_name=props.database_server_name, + sku=dbforpostgresql.SkuArgs( + name="Standard_B2s", + tier=dbforpostgresql.SkuTier.BURSTABLE, + ), + storage=dbforpostgresql.StorageArgs( + storage_size_gb=32, + ), + version=dbforpostgresql.ServerVersion.SERVER_VERSION_14, + opts=child_opts, + tags=child_tags, + ) + # Configure require_secure_transport + if props.disable_secure_transport: + dbforpostgresql.Configuration( + f"{self._name}_secure_transport_configuration", + configuration_name="require_secure_transport", + resource_group_name=props.database_resource_group_name, + server_name=db_server.name, + source="user-override", + value="OFF", + opts=ResourceOptions.merge( + child_opts, + # Pulumi workaround for being unable to delete Configuration + # resource + # https://github.com/pulumi/pulumi-azure-native/issues/3072 + ResourceOptions(parent=db_server, retain_on_delete=True), + ), + ) + # Add any databases that are requested + props.database_names.apply( + lambda db_names: [ + dbforpostgresql.Database( + f"{self._name}_db_{db_name}", + charset="UTF8", + database_name=db_name, + resource_group_name=props.database_resource_group_name, + server_name=db_server.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=db_server) + ), + ) + for db_name in db_names + ] + ) + # Deploy a private endpoint for the PostgreSQL server + private_endpoint = network.PrivateEndpoint( + f"{self._name}_private_endpoint", + location=props.location, + private_endpoint_name=Output.concat( + props.database_server_name, "-endpoint" + ), + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["postgresqlServer"], + name=Output.concat(props.database_server_name, "-privatelink"), + private_link_service_connection_state=network.PrivateLinkServiceConnectionStateArgs( + actions_required="None", + description="Auto-approved", + status="Approved", + ), + private_link_service_id=db_server.id, + ) + ], + resource_group_name=props.database_resource_group_name, + subnet=network.SubnetArgs(id=props.database_subnet_id), + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=db_server)), + tags=child_tags, + ) + + # Register outputs + self.db_server = db_server + self.private_endpoint = private_endpoint + self.private_ip_address = get_ip_addresses_from_private_endpoint( + private_endpoint + ).apply(lambda ips: ips[0]) diff --git a/data_safe_haven/infrastructure/components/composite/virtual_machine.py b/data_safe_haven/infrastructure/components/composite/virtual_machine.py new file mode 100644 index 0000000000..71fed08246 --- /dev/null +++ b/data_safe_haven/infrastructure/components/composite/virtual_machine.py @@ -0,0 +1,282 @@ +"""Pulumi component for virtual machines""" + +from collections.abc import Mapping +from typing import Any + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import compute, insights, maintenance, network + +from data_safe_haven.functions import replace_separators + + +class VMComponentProps: + """Properties for WindowsVMComponent""" + + image_reference_args: compute.ImageReferenceArgs | None + azure_monitor_extension_name: str + azure_monitor_extension_version: str + os_profile_args: compute.OSProfileArgs | None + + def __init__( + self, + admin_password: Input[str], + data_collection_rule_id: Input[str], + data_collection_endpoint_id: Input[str], + ip_address_private: Input[str], + location: Input[str], + resource_group_name: Input[str], + subnet_name: Input[str], + virtual_network_name: Input[str], + virtual_network_resource_group_name: Input[str], + vm_name: Input[str], + vm_size: Input[str], + admin_username: Input[str] | None = None, + ip_address_public: Input[bool] | None = None, + maintenance_configuration_id: Input[str] | None = None, + ) -> None: + self.admin_password = admin_password + self.admin_username = admin_username if admin_username else "dshvmadmin" + self.data_collection_rule_id = data_collection_rule_id + self.data_collection_rule_name = Output.from_input( + data_collection_rule_id + ).apply(lambda rule_id: str(rule_id).split("/")[-1]) + self.data_collection_endpoint_id = data_collection_endpoint_id + self.image_reference_args = None + self.ip_address_private = ip_address_private + self.ip_address_public = ip_address_public + self.location = location + self.maintenance_configuration_id = maintenance_configuration_id + self.os_profile_args = None + self.resource_group_name = resource_group_name + self.subnet_name = subnet_name + self.virtual_network_name = virtual_network_name + self.virtual_network_resource_group_name = virtual_network_resource_group_name + self.vm_name = vm_name + self.vm_name_underscored = Output.from_input(vm_name).apply( + lambda n: replace_separators(n, "_") + ) + self.vm_size = vm_size + + @property + def image_reference(self) -> compute.ImageReferenceArgs | None: + return self.image_reference_args + + @property + def os_profile(self) -> compute.OSProfileArgs | None: + return self.os_profile_args + + +class LinuxVMComponentProps(VMComponentProps): + """Properties for LinuxVMComponent""" + + def __init__( + self, + b64cloudinit: Input[str], + *args: Any, + **kwargs: Any, + ): + super().__init__(*args, **kwargs) + self.image_reference_args = compute.ImageReferenceArgs( + offer="0001-com-ubuntu-server-jammy", + publisher="Canonical", + sku="22_04-LTS-gen2", + version="latest", + ) + self.os_profile_args = compute.OSProfileArgs( + admin_password=self.admin_password, + admin_username=self.admin_username, + computer_name=Output.from_input(self.vm_name).apply(lambda n: n[:64]), + custom_data=Output.secret(b64cloudinit), + linux_configuration=compute.LinuxConfigurationArgs( + patch_settings=compute.LinuxPatchSettingsArgs( + assessment_mode=compute.LinuxPatchAssessmentMode.AUTOMATIC_BY_PLATFORM, + patch_mode=compute.LinuxVMGuestPatchMode.AUTOMATIC_BY_PLATFORM, + automatic_by_platform_settings=compute.LinuxVMGuestPatchAutomaticByPlatformSettingsArgs( + bypass_platform_safety_checks_on_user_schedule=True, + reboot_setting=compute.LinuxVMGuestPatchAutomaticByPlatformRebootSetting.IF_REQUIRED, + ), + ), + provision_vm_agent=True, + ), + ) + self.azure_monitor_extension_name = "AzureMonitorLinuxAgent" + self.azure_monitor_extension_version = "1.0" + + +class VMComponent(ComponentResource): + """Deploy SHM secrets with Pulumi""" + + def __init__( + self, + name: str, + props: VMComponentProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ): + super().__init__("dsh:common:VMComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = tags if tags else {} + name_underscored = replace_separators(self._name, "_") + + # Retrieve existing resources + subnet = network.get_subnet_output( + subnet_name=props.subnet_name, + resource_group_name=props.virtual_network_resource_group_name, + virtual_network_name=props.virtual_network_name, + ) + + # Define public IP address if relevant + network_interface_ip_params: dict[str, Any] = {} + if props.ip_address_public: + public_ip = network.PublicIPAddress( + f"{name_underscored}_public_ip", + location=props.location, + public_ip_address_name=Output.concat(props.vm_name, "-public-ip"), + public_ip_allocation_method="Static", + resource_group_name=props.resource_group_name, + sku=network.PublicIPAddressSkuArgs( + name=network.PublicIPAddressSkuName.STANDARD + ), + opts=child_opts, + tags=child_tags, + ) + network_interface_ip_params["public_ip_address"] = ( + network.PublicIPAddressArgs(id=public_ip.id) + ) + + # Define network card + network_interface = network.NetworkInterface( + f"{name_underscored}_network_interface", + enable_accelerated_networking=True, + ip_configurations=[ + network.NetworkInterfaceIPConfigurationArgs( + name=props.vm_name_underscored.apply( + lambda n: replace_separators(f"ipconfig{n}", "") + ), + private_ip_address=props.ip_address_private, + private_ip_allocation_method=network.IPAllocationMethod.STATIC, + subnet=network.SubnetArgs(id=subnet.id), + **network_interface_ip_params, + ) + ], + location=props.location, + network_interface_name=Output.concat(props.vm_name, "-nic"), + resource_group_name=props.resource_group_name, + opts=child_opts, + tags=child_tags, + ) + + # Define virtual machine + virtual_machine = compute.VirtualMachine( + name_underscored, + diagnostics_profile=compute.DiagnosticsProfileArgs( + boot_diagnostics=compute.BootDiagnosticsArgs(enabled=True) + ), + hardware_profile=compute.HardwareProfileArgs( + vm_size=props.vm_size, + ), + location=props.location, + network_profile=compute.NetworkProfileArgs( + network_interfaces=[ + compute.NetworkInterfaceReferenceArgs( + id=network_interface.id, + primary=True, + ) + ], + ), + os_profile=props.os_profile, + resource_group_name=props.resource_group_name, + storage_profile=compute.StorageProfileArgs( + image_reference=props.image_reference, + os_disk=compute.OSDiskArgs( + caching=compute.CachingTypes.READ_WRITE, + create_option=compute.DiskCreateOptionTypes.FROM_IMAGE, + delete_option=compute.DiskDeleteOptionTypes.DELETE, + managed_disk=compute.ManagedDiskParametersArgs( + storage_account_type=compute.StorageAccountTypes.PREMIUM_LRS, + ), + name=Output.concat(props.vm_name, "-osdisk"), + ), + ), + vm_name=props.vm_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, replace_on_changes=["os_profile"] + ), + ), + tags=child_tags, + ) + + # Register with Log Analytics workspace + compute.VirtualMachineExtension( + f"{name_underscored}_azure_monitor_extension", + auto_upgrade_minor_version=True, + enable_automatic_upgrade=True, + location=props.location, + publisher="Microsoft.Azure.Monitor", + resource_group_name=props.resource_group_name, + type=props.azure_monitor_extension_name, + type_handler_version=props.azure_monitor_extension_version, + vm_extension_name=props.azure_monitor_extension_name, + vm_name=virtual_machine.name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=virtual_machine), + ), + tags=child_tags, + ) + + # Register with maintenance configuration + maintenance.ConfigurationAssignment( + f"{name_underscored}_configuration_assignment", + configuration_assignment_name=Output.concat( + props.vm_name, "-maintenance-configuration" + ), + location=props.location, + maintenance_configuration_id=props.maintenance_configuration_id, + provider_name="Microsoft.Compute", + resource_group_name=props.resource_group_name, + resource_name_=virtual_machine.name, + resource_type="VirtualMachines", + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=virtual_machine), + ), + ) + + # Register with data collection rule + insights.DataCollectionRuleAssociation( + f"{name_underscored}_dcra_to_dcr", + association_name=Output.concat( + props.data_collection_rule_name, "-association" # this name is required + ), + data_collection_rule_id=props.data_collection_rule_id, + resource_uri=virtual_machine.id, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=virtual_machine), + ), + ) + + # Register with data collection endpoint + insights.DataCollectionRuleAssociation( + f"{name_underscored}_dcra_to_dce", + association_name="configurationAccessEndpoint", # this name is required + data_collection_endpoint_id=props.data_collection_endpoint_id, + resource_uri=virtual_machine.id, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=virtual_machine), + ), + ) + + # Register outputs + self.ip_address_private: Output[str] = Output.from_input( + props.ip_address_private + ) + self.resource_group_name: Output[str] = Output.from_input( + props.resource_group_name + ) + self.vm_name: Output[str] = virtual_machine.name + self.vm_size: Output[str] = Output.from_input(props.vm_size) diff --git a/data_safe_haven/infrastructure/components/dynamic/__init__.py b/data_safe_haven/infrastructure/components/dynamic/__init__.py new file mode 100644 index 0000000000..429fc8470d --- /dev/null +++ b/data_safe_haven/infrastructure/components/dynamic/__init__.py @@ -0,0 +1,15 @@ +from .blob_container_acl import BlobContainerAcl, BlobContainerAclProps +from .entra_application import EntraApplication, EntraApplicationProps +from .file_share_file import FileShareFile, FileShareFileProps +from .ssl_certificate import SSLCertificate, SSLCertificateProps + +__all__ = [ + "BlobContainerAcl", + "BlobContainerAclProps", + "EntraApplication", + "EntraApplicationProps", + "FileShareFile", + "FileShareFileProps", + "SSLCertificate", + "SSLCertificateProps", +] diff --git a/data_safe_haven/infrastructure/components/dynamic/blob_container_acl.py b/data_safe_haven/infrastructure/components/dynamic/blob_container_acl.py new file mode 100644 index 0000000000..a1dc92f8a1 --- /dev/null +++ b/data_safe_haven/infrastructure/components/dynamic/blob_container_acl.py @@ -0,0 +1,114 @@ +"""Pulumi dynamic component for setting ACLs on an Azure blob container.""" + +from typing import Any + +from pulumi import Input, Output, ResourceOptions +from pulumi.dynamic import CreateResult, DiffResult, Resource + +from data_safe_haven.exceptions import DataSafeHavenPulumiError +from data_safe_haven.external import AzureSdk + +from .dsh_resource_provider import DshResourceProvider + + +class BlobContainerAclProps: + """Props for the BlobContainerAcl class""" + + def __init__( + self, + *, + acl_user: Input[str], + acl_group: Input[str], + acl_other: Input[str], + apply_default_permissions: bool, + container_name: Input[str], + resource_group_name: Input[str], + storage_account_name: Input[str], + subscription_name: Input[str], + ) -> None: + self.container_name = container_name + acl_arguments = [ + "user::", + acl_user, + ",group::", + acl_group, + ",other::", + acl_other, + ] + if apply_default_permissions: + acl_arguments += [ + ",default:user::", + acl_user, + ",default:group::", + acl_group, + ",default:other::", + acl_other, + ] + self.desired_acl = Output.concat(*acl_arguments) + self.resource_group_name = resource_group_name + self.storage_account_name = storage_account_name + self.subscription_name = subscription_name + + +class BlobContainerAclProvider(DshResourceProvider): + def create(self, props: dict[str, Any]) -> CreateResult: + """Set ACLs for a given blob container.""" + outs = dict(**props) + try: + azure_sdk = AzureSdk(props["subscription_name"], disable_logging=True) + azure_sdk.set_blob_container_acl( + container_name=props["container_name"], + desired_acl=props["desired_acl"], + resource_group_name=props["resource_group_name"], + storage_account_name=props["storage_account_name"], + ) + except Exception as exc: + msg = f"Failed to set ACLs on storage account '{props['storage_account_name']}'." + raise DataSafeHavenPulumiError(msg) from exc + return CreateResult( + f"BlobContainerAcl-{props['container_name']}", + outs=outs, + ) + + def delete(self, id_: str, props: dict[str, Any]) -> None: + """Restore default ACLs""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + try: + azure_sdk = AzureSdk(props["subscription_name"], disable_logging=True) + azure_sdk.set_blob_container_acl( + container_name=props["container_name"], + desired_acl="user::rwx,group::r-x,other::---", + resource_group_name=props["resource_group_name"], + storage_account_name=props["storage_account_name"], + ) + except Exception as exc: + msg = f"Failed to delete custom ACLs on storage account '{props['storage_account_name']}'." + raise DataSafeHavenPulumiError(msg) from exc + + def diff( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> DiffResult: + """Calculate diff between old and new state""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + return self.partial_diff(old_props, new_props) + + def refresh(self, props: dict[str, Any]) -> dict[str, Any]: + """TODO: check whether ACLs have changed""" + return dict(**props) + + +class BlobContainerAcl(Resource): + _resource_type_name = "dsh:common:BlobContainerAcl" # set resource type + + def __init__( + self, + name: str, + props: BlobContainerAclProps, + opts: ResourceOptions | None = None, + ): + super().__init__(BlobContainerAclProvider(), name, vars(props), opts) diff --git a/data_safe_haven/infrastructure/components/dynamic/dsh_resource_provider.py b/data_safe_haven/infrastructure/components/dynamic/dsh_resource_provider.py new file mode 100644 index 0000000000..5cc64f0a8e --- /dev/null +++ b/data_safe_haven/infrastructure/components/dynamic/dsh_resource_provider.py @@ -0,0 +1,164 @@ +"""Pulumi base dynamic component.""" + +from abc import ABCMeta, abstractmethod +from collections.abc import Sequence +from typing import Any + +from pulumi.dynamic import ( + CheckResult, + CreateResult, + DiffResult, + ReadResult, + ResourceProvider, + UpdateResult, +) + + +class DshResourceProvider(ResourceProvider, metaclass=ABCMeta): + @staticmethod + def partial_diff( + old_props: dict[str, Any], + new_props: dict[str, Any], + excluded_props: Sequence[str] = [], + ) -> DiffResult: + """ + Calculate a diff between an old and new set of props, excluding any if requested. + + Args: + old_props: the outputs from the last create operation + new_props: a set of checked inputs + excluded_props: any props to exclude from the comparison + + Returns: + DiffResult: + - changes: whether any non-excluded props have changed + - replaces: any non-excluded props that have changed + - stables: any properties that are excluded or unchanged + - delete_before_replace: True + """ + # List any values that were not present in old_props or have been changed + # Exclude any from excluded_props which should not trigger a diff + altered_props = [ + property_ + for property_ in [ + key for key in new_props.keys() if key not in excluded_props + ] + if (property_ not in old_props) + or (old_props[property_] != new_props[property_]) + ] + stable_props = [ + property_ + for property_ in old_props.keys() + if property_ not in altered_props + ] + return DiffResult( + changes=(altered_props != []), # changes are needed + replaces=altered_props, # properties that cannot be updated in-place + stables=stable_props, # properties that will not change on update + delete_before_replace=True, # delete the existing resource before replacing + ) + + def check( + self, old_props: dict[str, Any], new_props: dict[str, Any] + ) -> CheckResult: + """ + Invoked on update before any other method. + Verify that the props are valid or return useful error messages if they are not. + + Returns: + CheckResult: a set of checked inputs together with any failures + """ + # Ensure that old props are up-to-date + props = self.refresh(old_props) + # Overwrite with any changes from new props + props.update(new_props) + return CheckResult(props, []) + + @abstractmethod + def create(self, props: dict[str, Any]) -> CreateResult: + """ + Invoked when the desired resource is not found in the existing state. + + Args: + props: a set of checked inputs + + Returns: + CreateResult: a unique ID for this object plus a set of output properties + """ + + @abstractmethod + def delete(self, id_: str, old_props: dict[str, Any]) -> None: + """ + Invoked when the desired resource is found in the existing state but is not wanted. + + Args: + id_: the ID of the resource + old_props: the outputs from the last create operation + """ + + @abstractmethod + def diff( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> DiffResult: + """ + Invoked when the desired resource is found in the existing state. + + Args: + id_: the ID of the resource + old_props: the outputs from the last create operation + new_props: a set of checked inputs + + Returns: + DiffResult: + - changes: whether changes are needed + - replaces: any changed properties that mean a replacement is needed instead of an update + - stables: any properties that have not changed + - delete_before_replace: whether to delete the old object before creating the new one + """ + + def read(self, id_: str, props: dict[str, Any]) -> ReadResult: + """ + Invoked when Pulumi needs to get data about a non-managed resource + + Args: + id_: the ID of the resource + props: a set of checked inputs used to disambiguate the request + + Returns: + CreateResult: a unique ID for this object plus a set of output properties + """ + return ReadResult(id_, self.refresh(props)) + + @abstractmethod + def refresh(self, props: dict[str, Any]) -> dict[str, Any]: + """ + Given a set of props, check whether these are still correct. + + Returns: + dict[str, Any]: a set of props that represent the current state of the remote object + """ + return dict(**props) + + def update( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> UpdateResult: + """ + Invoked when the desired resource needs a change but not a replacement. + + Args: + id_: the ID of the resource + old_props: the outputs from the last create operation + new_props: a set of checked inputs + + Returns: + UpdateResult: a set of output properties + """ + self.delete(id_, old_props) + updated = self.create(new_props) + return UpdateResult(outs=updated.outs) diff --git a/data_safe_haven/infrastructure/components/dynamic/entra_application.py b/data_safe_haven/infrastructure/components/dynamic/entra_application.py new file mode 100644 index 0000000000..fd2d233137 --- /dev/null +++ b/data_safe_haven/infrastructure/components/dynamic/entra_application.py @@ -0,0 +1,191 @@ +"""Pulumi dynamic component for Entra applications.""" + +from contextlib import suppress +from typing import Any + +from pulumi import Input, Output, ResourceOptions +from pulumi.dynamic import CreateResult, DiffResult, Resource, UpdateResult + +from data_safe_haven.exceptions import DataSafeHavenMicrosoftGraphError +from data_safe_haven.external import GraphApi + +from .dsh_resource_provider import DshResourceProvider + + +class EntraApplicationProps: + """Props for the EntraApplication class""" + + def __init__( + self, + application_name: Input[str], + application_role_assignments: Input[list[str]] | None = None, + application_secret_name: Input[str] | None = None, + delegated_role_assignments: Input[list[str]] | None = None, + public_client_redirect_uri: Input[str] | None = None, + web_redirect_url: Input[str] | None = None, + ) -> None: + self.application_name = application_name + self.application_role_assignments = application_role_assignments + self.application_secret_name = application_secret_name + self.delegated_role_assignments = delegated_role_assignments + self.public_client_redirect_uri = public_client_redirect_uri + self.web_redirect_url = web_redirect_url + + +class EntraApplicationProvider(DshResourceProvider): + def __init__(self, auth_token: str): + self.auth_token = auth_token + super().__init__() + + def create(self, props: dict[str, Any]) -> CreateResult: + """Create new Entra application.""" + outs = dict(**props) + try: + graph_api = GraphApi.from_token(self.auth_token, disable_logging=True) + request_json = { + "displayName": props["application_name"], + "signInAudience": "AzureADMyOrg", + } + # Add a web redirection URL if requested + if props.get("web_redirect_url", None): + request_json["web"] = { + "redirectUris": [props["web_redirect_url"]], + "implicitGrantSettings": {"enableIdTokenIssuance": True}, + } + # Add a public client redirection URL if requested + if props.get("public_client_redirect_uri", None): + request_json["publicClient"] = { + "redirectUris": [props["public_client_redirect_uri"]], + } + json_response = graph_api.create_application( + props["application_name"], + application_scopes=props.get("application_role_assignments", []), + delegated_scopes=props.get("delegated_role_assignments", []), + request_json=request_json, + ) + outs["object_id"] = json_response["id"] + outs["application_id"] = json_response["appId"] + + # Grant requested role permissions + graph_api.grant_role_permissions( + outs["application_name"], + application_role_assignments=props.get( + "application_role_assignments", [] + ), + delegated_role_assignments=props.get("delegated_role_assignments", []), + ) + + # Attach an application secret if requested + outs["application_secret"] = ( + graph_api.create_application_secret( + props["application_name"], + props["application_secret_name"], + ) + if props.get("application_secret_name", None) + else "" + ) + except Exception as exc: + msg = f"Failed to create application '{props['application_name']}' in Entra ID." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + return CreateResult( + f"EntraApplication-{props['application_name']}", + outs=outs, + ) + + def delete(self, id_: str, props: dict[str, Any]) -> None: + """Delete an Entra application.""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + try: + graph_api = GraphApi.from_token(self.auth_token, disable_logging=True) + graph_api.delete_application(props["application_name"]) + except Exception as exc: + msg = f"Failed to delete application '{props['application_name']}' from Entra ID." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def diff( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> DiffResult: + """Calculate diff between old and new state""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + # We exclude '__provider' from the diff. This is a Base64-encoded pickle of this + # EntraApplicationProvider instance. This means that it contains self.auth_token + # and would otherwise trigger a diff each time the auth_token changes. Note that + # ignoring '__provider' could cause issues if the structure of this class + # changes in any other way, but this could be fixed by manually deleting the + # application in the Entra directory. + return self.partial_diff(old_props, new_props, excluded_props=["__provider"]) + + def refresh(self, props: dict[str, Any]) -> dict[str, Any]: + try: + outs = dict(**props) + with suppress(DataSafeHavenMicrosoftGraphError, KeyError): + graph_api = GraphApi.from_token(self.auth_token, disable_logging=True) + if json_response := graph_api.get_application_by_name( + outs["application_name"] + ): + outs["object_id"] = json_response["id"] + outs["application_id"] = json_response["appId"] + + # Ensure that requested role permissions have been granted + graph_api.grant_role_permissions( + outs["application_name"], + application_role_assignments=props.get( + "application_role_assignments", [] + ), + delegated_role_assignments=props.get( + "delegated_role_assignments", [] + ), + ) + return outs + except Exception as exc: + msg = f"Failed to refresh application '{props['application_name']}' in Entra ID." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + def update( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> UpdateResult: + """Updating is deleting followed by creating.""" + try: + # Delete the old application, using the auth token from new_props + old_props_ = {**old_props} + self.delete(id_, old_props_) + # Create a new application + updated = self.create(new_props) + return UpdateResult(outs=updated.outs) + except Exception as exc: + msg = f"Failed to update application '{new_props['application_name']}' in Entra ID." + raise DataSafeHavenMicrosoftGraphError(msg) from exc + + +class EntraApplication(Resource): + application_id: Output[str] + application_secret: Output[str] + object_id: Output[str] + _resource_type_name = "dsh:common:EntraApplication" # set resource type + + def __init__( + self, + name: str, + props: EntraApplicationProps, + auth_token: str, + opts: ResourceOptions | None = None, + ): + super().__init__( + EntraApplicationProvider(auth_token), + name, + { + "application_id": None, + "application_secret": None, + "object_id": None, + **vars(props), + }, + opts, + ) diff --git a/data_safe_haven/infrastructure/components/dynamic/file_share_file.py b/data_safe_haven/infrastructure/components/dynamic/file_share_file.py new file mode 100644 index 0000000000..67d7d90414 --- /dev/null +++ b/data_safe_haven/infrastructure/components/dynamic/file_share_file.py @@ -0,0 +1,148 @@ +"""Pulumi dynamic component for files uploaded to an Azure FileShare.""" + +from contextlib import suppress +from typing import Any + +from azure.core.exceptions import ResourceNotFoundError, ServiceRequestError +from azure.storage.fileshare import ShareDirectoryClient, ShareFileClient +from pulumi import Input, Output, ResourceOptions +from pulumi.dynamic import CreateResult, DiffResult, Resource + +from data_safe_haven.exceptions import DataSafeHavenAzureError + +from .dsh_resource_provider import DshResourceProvider + + +class FileShareFileProps: + """Props for the FileShareFile class""" + + def __init__( + self, + destination_path: Input[str], + file_contents: Input[str], + share_name: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + ) -> None: + self.destination_path = destination_path + self.file_contents = file_contents + self.share_name = share_name + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + + +class FileShareFileProvider(DshResourceProvider): + @staticmethod + def file_exists(file_client: ShareFileClient) -> bool: + with suppress(ResourceNotFoundError, ServiceRequestError): + file_client.get_file_properties() + return True + return False + + @staticmethod + def get_file_client( + storage_account_name: str, + storage_account_key: str, + share_name: str, + destination_path: str, + ) -> ShareFileClient: + tokens = destination_path.split("/") + directory = "/".join(tokens[:-1]) + file_name = tokens[-1] + if directory: + directory_client = ShareDirectoryClient( + account_url=f"https://{storage_account_name}.file.core.windows.net", + share_name=share_name, + directory_path=directory, + credential=storage_account_key, + ) + if not directory_client.exists(): + directory_client.create_directory() + return directory_client.get_file_client(file_name) + return ShareFileClient( + account_url=f"https://{storage_account_name}.file.core.windows.net", + share_name=share_name, + file_path=file_name, + credential=storage_account_key, + ) + + def create(self, props: dict[str, Any]) -> CreateResult: + """Create file in target storage account with specified contents.""" + outs = dict(**props) + file_client: ShareFileClient | None = None + try: + file_client = self.get_file_client( + props["storage_account_name"], + props["storage_account_key"], + props["share_name"], + props["destination_path"], + ) + file_client.upload_file(props["file_contents"].encode("utf-8")) + outs["file_name"] = file_client.file_name + except Exception as exc: + file_name = file_client.file_name if file_client else "" + msg = f"Failed to upload data to '{file_name}' in [green]{props['share_name']}[/]." + raise DataSafeHavenAzureError(msg) from exc + return CreateResult( + f"filesharefile-{props['destination_path'].replace('/', '-')}", + outs=outs, + ) + + def delete(self, id_: str, props: dict[str, Any]) -> None: + """Delete a file from the target storage account""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + file_client: ShareFileClient | None = None + try: + file_client = self.get_file_client( + props["storage_account_name"], + props["storage_account_key"], + props["share_name"], + props["destination_path"], + ) + if self.file_exists(file_client): + file_client.close_all_handles() + file_client.delete_file() + except Exception as exc: + file_name = file_client.file_name if file_client else "" + msg = f"Failed to delete file '{file_name}' in [green]{props['share_name']}[/]." + raise DataSafeHavenAzureError(msg) from exc + + def diff( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> DiffResult: + """Calculate diff between old and new state""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + # Exclude "storage_account_key" which should not trigger a diff + return self.partial_diff(old_props, new_props, ["storage_account_key"]) + + def refresh(self, props: dict[str, Any]) -> dict[str, Any]: + with suppress(Exception): + file_client = FileShareFileProvider.get_file_client( + props["storage_account_name"], + props["storage_account_key"], + props["share_name"], + props["destination_path"], + ) + if not FileShareFileProvider.file_exists(file_client): + props["file_name"] = "" + return dict(**props) + + +class FileShareFile(Resource): + file_name: Output[str] + _resource_type_name = "dsh:common:FileShareFile" # set resource type + + def __init__( + self, + name: str, + props: FileShareFileProps, + opts: ResourceOptions | None = None, + ): + super().__init__( + FileShareFileProvider(), name, {"file_name": None, **vars(props)}, opts + ) diff --git a/data_safe_haven/infrastructure/components/dynamic/ssl_certificate.py b/data_safe_haven/infrastructure/components/dynamic/ssl_certificate.py new file mode 100644 index 0000000000..b80e6a7dbe --- /dev/null +++ b/data_safe_haven/infrastructure/components/dynamic/ssl_certificate.py @@ -0,0 +1,199 @@ +"""Pulumi dynamic component for SSL certificates uploaded to an Azure KeyVault.""" + +import time +from contextlib import suppress +from typing import Any + +from acme.errors import ValidationError +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey +from cryptography.hazmat.primitives.serialization import ( + NoEncryption, + load_pem_private_key, + pkcs12, +) +from cryptography.x509 import load_pem_x509_certificate +from pulumi import Input, Output, ResourceOptions +from pulumi.dynamic import CreateResult, DiffResult, Resource +from simple_acme_dns import ACMEClient + +from data_safe_haven.exceptions import DataSafeHavenAzureError, DataSafeHavenSSLError +from data_safe_haven.external import AzureSdk + +from .dsh_resource_provider import DshResourceProvider + + +class SSLCertificateProps: + """Props for the SSLCertificate class""" + + def __init__( + self, + certificate_secret_name: Input[str], + domain_name: Input[str], + admin_email_address: Input[str], + key_vault_name: Input[str], + networking_resource_group_name: Input[str], + subscription_name: Input[str], + ) -> None: + self.certificate_secret_name = certificate_secret_name + self.domain_name = domain_name + self.admin_email_address = admin_email_address + self.key_vault_name = key_vault_name + self.networking_resource_group_name = networking_resource_group_name + self.subscription_name = subscription_name + + +class SSLCertificateProvider(DshResourceProvider): + def create(self, props: dict[str, Any]) -> CreateResult: + """Create new SSL certificate.""" + outs = dict(**props) + try: + client = ACMEClient( + domains=[props["domain_name"]], + email=props["admin_email_address"], + directory="https://acme-v02.api.letsencrypt.org/directory", + nameservers=["8.8.8.8", "1.1.1.1"], + new_account=True, + ) + # Generate private key and CSR + # Note that we must set the key to RSA-2048 before generating the CSR + # The default is ecdsa-with-SHA25, which Azure Key Vault cannot read + private_key_bytes = client.generate_private_key(key_type="rsa2048") + client.generate_csr() + # Request DNS verification tokens and add them to the DNS record + azure_sdk = AzureSdk(props["subscription_name"], disable_logging=True) + verification_tokens = client.request_verification_tokens().items() + for record_name, record_values in verification_tokens: + record_set = azure_sdk.ensure_dns_txt_record( + record_name=record_name.replace(f".{props['domain_name']}", ""), + record_value=record_values[0], + resource_group_name=props["networking_resource_group_name"], + zone_name=props["domain_name"], + ) + # Wait for DNS propagation to complete + if not client.check_dns_propagation( + authoritative=False, round_robin=True, verbose=False + ): + msg = "DNS propagation failed" + raise DataSafeHavenSSLError(msg) + # Wait for the TTL for this record to expire to remove risk of caching + time.sleep(record_set.ttl or 30) + # Request a signed certificate + try: + certificate_bytes = client.request_certificate() + except ValidationError as exc: + msg = "\n".join( + ["ACME validation error:"] + + [str(auth_error) for auth_error in exc.failed_authzrs] + + [ + f"TXT record {record_name} is currently set to {record_values}" + for (record_name, record_values) in verification_tokens + ] + ) + raise DataSafeHavenSSLError(msg) from exc + # Although KeyVault will accept a PEM certificate (where we simply prepend + # the private key) we need a PFX certificate for compatibility with + # ApplicationGateway + private_key = load_pem_private_key(private_key_bytes, None) + if not isinstance(private_key, RSAPrivateKey): + msg = f"Private key is of type {type(private_key)} not RSAPrivateKey." + raise TypeError(msg) + all_certs = [ + load_pem_x509_certificate(data) + for data in certificate_bytes.split(b"\n\n") + ] + certificate = next( + cert for cert in all_certs if props["domain_name"] in str(cert.subject) + ) + ca_certs = [cert for cert in all_certs if cert != certificate] + pfx_bytes = pkcs12.serialize_key_and_certificates( + props["certificate_secret_name"].encode("utf-8"), + private_key, + certificate, + ca_certs, + NoEncryption(), + ) + # Add certificate to KeyVault + kvcert = azure_sdk.import_keyvault_certificate( + certificate_name=props["certificate_secret_name"], + certificate_contents=pfx_bytes, + key_vault_name=props["key_vault_name"], + ) + outs["secret_id"] = kvcert.secret_id + except Exception as exc: + cert_name = f"[green]{props['certificate_secret_name']}[/]" + domain_name = f"[green]{props['domain_name']}[/]" + msg = f"Failed to create SSL certificate {cert_name} for {domain_name}." + raise DataSafeHavenSSLError(msg) from exc + return CreateResult( + f"SSLCertificate-{props['certificate_secret_name']}", + outs=outs, + ) + + def delete(self, id_: str, props: dict[str, Any]) -> None: + """Delete an SSL certificate.""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + try: + # Remove the DNS record + azure_sdk = AzureSdk(props["subscription_name"], disable_logging=True) + azure_sdk.remove_dns_txt_record( + record_name="_acme_challenge", + resource_group_name=props["networking_resource_group_name"], + zone_name=props["domain_name"], + ) + # Remove the Key Vault certificate + azure_sdk.remove_keyvault_certificate( + certificate_name=props["certificate_secret_name"], + key_vault_name=props["key_vault_name"], + ) + except Exception as exc: + cert_name = f"[green]{props['certificate_secret_name']}[/]" + domain_name = f"[green]{props['domain_name']}[/]" + msg = f"Failed to delete SSL certificate {cert_name} for {domain_name}." + raise DataSafeHavenSSLError(msg) from exc + + def diff( + self, + id_: str, + old_props: dict[str, Any], + new_props: dict[str, Any], + ) -> DiffResult: + """Calculate diff between old and new state""" + # Use `id` as a no-op to avoid ARG002 while maintaining function signature + id(id_) + return self.partial_diff(old_props, new_props, []) + + def refresh(self, props: dict[str, Any]) -> dict[str, Any]: + try: + outs = dict(**props) + with suppress(DataSafeHavenAzureError, KeyError): + azure_sdk = AzureSdk(outs["subscription_name"], disable_logging=True) + certificate = azure_sdk.get_keyvault_certificate( + outs["certificate_secret_name"], outs["key_vault_name"] + ) + if certificate.secret_id: + outs["secret_id"] = certificate.secret_id + return outs + except Exception as exc: + cert_name = f"[green]{props['certificate_secret_name']}[/]" + domain_name = f"[green]{props['domain_name']}[/]" + msg = f"Failed to refresh SSL certificate {cert_name} for {domain_name}." + raise DataSafeHavenSSLError(msg) from exc + + +class SSLCertificate(Resource): + _resource_type_name = "dsh:common:SSLCertificate" # set resource type + secret_id: Output[str] + + def __init__( + self, + name: str, + props: SSLCertificateProps, + opts: ResourceOptions | None = None, + ): + super().__init__( + SSLCertificateProvider(), + name, + {"secret_id": None, **vars(props)}, + opts, + ) diff --git a/data_safe_haven/infrastructure/components/wrapped/__init__.py b/data_safe_haven/infrastructure/components/wrapped/__init__.py new file mode 100644 index 0000000000..fc5f8c8f61 --- /dev/null +++ b/data_safe_haven/infrastructure/components/wrapped/__init__.py @@ -0,0 +1,5 @@ +from .log_analytics_workspace import WrappedLogAnalyticsWorkspace + +__all__ = [ + "WrappedLogAnalyticsWorkspace", +] diff --git a/data_safe_haven/infrastructure/components/wrapped/log_analytics_workspace.py b/data_safe_haven/infrastructure/components/wrapped/log_analytics_workspace.py new file mode 100644 index 0000000000..34df0422e7 --- /dev/null +++ b/data_safe_haven/infrastructure/components/wrapped/log_analytics_workspace.py @@ -0,0 +1,65 @@ +"""Wrapper for the Pulumi Log Analytics Workspace component""" + +from collections.abc import Mapping + +import pulumi +from pulumi_azure_native import operationalinsights + + +class WrappedLogAnalyticsWorkspace(operationalinsights.Workspace): + def __init__( + self, + resource_name: str, + *, + location: pulumi.Input[str], + resource_group_name: pulumi.Input[str], + retention_in_days: pulumi.Input[int], + sku: pulumi.Input[operationalinsights.WorkspaceSkuArgs], + workspace_name: pulumi.Input[str], + opts: pulumi.ResourceOptions, + tags: pulumi.Input[Mapping[str, pulumi.Input[str]]], + ): + self.resource_group_name_ = pulumi.Output.from_input(resource_group_name) + super().__init__( + resource_name=resource_name, + location=location, + resource_group_name=resource_group_name, + retention_in_days=retention_in_days, + sku=sku, + workspace_name=workspace_name, + opts=opts, + tags=tags, + ) + + @property + def resource_group_name(self) -> pulumi.Output[str]: + """ + Gets the name of the resource group where this log analytics workspace is deployed. + """ + return self.resource_group_name_ + + @property + def workspace_id(self) -> pulumi.Output[str]: + """ + Gets the ID of this workspace. + """ + return self.customer_id + + @property + def workspace_key(self) -> pulumi.Output[str]: + """ + Gets the key for this workspace. + """ + workspace_keys: pulumi.Output[operationalinsights.GetSharedKeysResult] = ( + pulumi.Output.all( + resource_group_name=self.resource_group_name, + workspace_name=self.name, + ).apply(lambda kwargs: operationalinsights.get_shared_keys_output(**kwargs)) + ) + return pulumi.Output.secret( + workspace_keys.apply( + lambda keys: ( + keys.primary_shared_key if keys.primary_shared_key else "UNKNOWN" + ) + ) + ) diff --git a/data_safe_haven/infrastructure/programs/__init__.py b/data_safe_haven/infrastructure/programs/__init__.py new file mode 100644 index 0000000000..48c81e511e --- /dev/null +++ b/data_safe_haven/infrastructure/programs/__init__.py @@ -0,0 +1,7 @@ +from .declarative_sre import DeclarativeSRE +from .imperative_shm import ImperativeSHM + +__all__ = [ + "DeclarativeSRE", + "ImperativeSHM", +] diff --git a/data_safe_haven/infrastructure/programs/declarative_sre.py b/data_safe_haven/infrastructure/programs/declarative_sre.py new file mode 100644 index 0000000000..614d8ac2b2 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/declarative_sre.py @@ -0,0 +1,389 @@ +"""Pulumi declarative program""" + +import pulumi +from pulumi_azure_native import resources + +from data_safe_haven.config import Context, SREConfig +from data_safe_haven.functions import replace_separators +from data_safe_haven.infrastructure.common import DockerHubCredentials + +from .sre.application_gateway import ( + SREApplicationGatewayComponent, + SREApplicationGatewayProps, +) +from .sre.apt_proxy_server import SREAptProxyServerComponent, SREAptProxyServerProps +from .sre.backup import SREBackupComponent, SREBackupProps +from .sre.clamav_mirror import SREClamAVMirrorComponent, SREClamAVMirrorProps +from .sre.data import SREDataComponent, SREDataProps +from .sre.dns_server import SREDnsServerComponent, SREDnsServerProps +from .sre.firewall import SREFirewallComponent, SREFirewallProps +from .sre.identity import SREIdentityComponent, SREIdentityProps +from .sre.monitoring import SREMonitoringComponent, SREMonitoringProps +from .sre.networking import SRENetworkingComponent, SRENetworkingProps +from .sre.remote_desktop import SRERemoteDesktopComponent, SRERemoteDesktopProps +from .sre.user_services import SREUserServicesComponent, SREUserServicesProps +from .sre.workspaces import SREWorkspacesComponent, SREWorkspacesProps + + +class DeclarativeSRE: + """Deploy with Pulumi""" + + def __init__( + self, + context: Context, + config: SREConfig, + graph_api_token: str, + ) -> None: + self.context = context + self.config = config + self.graph_api_token = graph_api_token + self.stack_name = replace_separators( + f"shm-{context.name}-sre-{config.name}", "-" + ) + self.tags = { + "deployed with": "Pulumi", + "sre_name": f"SRE {config.name}", + } | context.tags + + def __call__(self) -> None: + # Load pulumi configuration options + self.pulumi_opts = pulumi.Config() + shm_admin_group_id = self.pulumi_opts.require("shm-admin-group-id") + shm_entra_tenant_id = self.pulumi_opts.require("shm-entra-tenant-id") + shm_fqdn = self.pulumi_opts.require("shm-fqdn") + + # Construct DockerHubCredentials + dockerhub_credentials = DockerHubCredentials( + access_token=self.config.dockerhub.access_token, + server="index.docker.io", + username=self.config.dockerhub.username, + ) + + # Construct LDAP paths + ldap_root_dn = f"DC={shm_fqdn.replace('.', ',DC=')}" + ldap_group_search_base = f"OU=groups,{ldap_root_dn}" + ldap_user_search_base = f"OU=users,{ldap_root_dn}" + ldap_group_name_prefix = f"Data Safe Haven SRE {self.config.name}" + ldap_group_names = { + "admin_group_name": f"{ldap_group_name_prefix} Administrators", + "privileged_user_group_name": f"{ldap_group_name_prefix} Privileged Users", + "user_group_name": f"{ldap_group_name_prefix} Users", + } + ldap_username_attribute = "uid" + # LDAP filter syntax: https://ldap.com/ldap-filters/ + # LDAP filter for users of this SRE + ldap_user_filter = "".join( + [ + "(&", + # Users are a posixAccount and + "(objectClass=posixAccount)", + # belong to any of these groups + "(|", + *( + f"(memberOf=CN={group_name},{ldap_group_search_base})" + for group_name in ldap_group_names.values() + ), + ")", + ")", + ] + ) + # LDAP filter for groups in this SRE + ldap_group_filter = "".join( + [ + "(&", + # Groups are a posixGroup + "(objectClass=posixGroup)", + "(|", + # which is either one of the LDAP groups + *(f"(CN={group_name})" for group_name in ldap_group_names.values()), + # or is the primary user group for a member of one of those groups + *( + f"(memberOf=CN=Primary user groups for {group_name},{ldap_group_search_base})" + for group_name in ldap_group_names.values() + ), + ")", + ")", + ] + ) + + # Deploy resource group + resource_group = resources.ResourceGroup( + "sre_resource_group", + location=self.config.azure.location, + resource_group_name=f"{self.stack_name}-rg", + tags=self.tags, + ) + + # Deploy SRE DNS server + dns = SREDnsServerComponent( + "sre_dns_server", + self.stack_name, + SREDnsServerProps( + dockerhub_credentials=dockerhub_credentials, + location=self.config.azure.location, + resource_group_name=resource_group.name, + shm_fqdn=shm_fqdn, + ), + tags=self.tags, + ) + + # Deploy networking + networking = SRENetworkingComponent( + "sre_networking", + self.stack_name, + SRENetworkingProps( + dns_private_zones=dns.private_zones, + dns_server_ip=dns.ip_address, + dns_virtual_network=dns.virtual_network, + location=self.config.azure.location, + resource_group_name=resource_group.name, + shm_fqdn=shm_fqdn, + shm_resource_group_name=self.context.resource_group_name, + shm_zone_name=shm_fqdn, + sre_name=self.config.name, + user_public_ip_ranges=self.config.sre.research_user_ip_addresses, + ), + tags=self.tags, + ) + + # Deploy SRE firewall + SREFirewallComponent( + "sre_firewall", + self.stack_name, + SREFirewallProps( + location=self.config.azure.location, + resource_group_name=resource_group.name, + route_table_name=networking.route_table_name, + subnet_apt_proxy_server=networking.subnet_apt_proxy_server, + subnet_clamav_mirror=networking.subnet_clamav_mirror, + subnet_firewall=networking.subnet_firewall, + subnet_firewall_management=networking.subnet_firewall_management, + subnet_guacamole_containers=networking.subnet_guacamole_containers, + subnet_identity_containers=networking.subnet_identity_containers, + subnet_user_services_software_repositories=networking.subnet_user_services_software_repositories, + subnet_workspaces=networking.subnet_workspaces, + ), + tags=self.tags, + ) + + # Deploy data storage + data = SREDataComponent( + "sre_data", + self.stack_name, + SREDataProps( + admin_email_address=self.config.sre.admin_email_address, + admin_group_id=shm_admin_group_id, + admin_ip_addresses=self.config.sre.admin_ip_addresses, + data_provider_ip_addresses=self.config.sre.data_provider_ip_addresses, + dns_private_zones=dns.private_zones, + dns_record=networking.shm_ns_record, + dns_server_admin_password=dns.password_admin, + location=self.config.azure.location, + resource_group=resource_group, + sre_fqdn=networking.sre_fqdn, + subnet_data_configuration=networking.subnet_data_configuration, + subnet_data_desired_state=networking.subnet_data_desired_state, + subnet_data_private=networking.subnet_data_private, + subscription_id=self.config.azure.subscription_id, + subscription_name=self.context.subscription_name, + tenant_id=self.config.azure.tenant_id, + ), + tags=self.tags, + ) + + # Deploy the apt proxy server + apt_proxy_server = SREAptProxyServerComponent( + "sre_apt_proxy_server", + self.stack_name, + SREAptProxyServerProps( + containers_subnet=networking.subnet_apt_proxy_server, + dns_server_ip=dns.ip_address, + location=self.config.azure.location, + resource_group_name=resource_group.name, + sre_fqdn=networking.sre_fqdn, + storage_account_key=data.storage_account_data_configuration_key, + storage_account_name=data.storage_account_data_configuration_name, + ), + tags=self.tags, + ) + + # Deploy the ClamAV mirror server + clamav_mirror = SREClamAVMirrorComponent( + "sre_clamav_mirror", + self.stack_name, + SREClamAVMirrorProps( + dns_server_ip=dns.ip_address, + dockerhub_credentials=dockerhub_credentials, + location=self.config.azure.location, + resource_group_name=resource_group.name, + sre_fqdn=networking.sre_fqdn, + storage_account_key=data.storage_account_data_configuration_key, + storage_account_name=data.storage_account_data_configuration_name, + subnet=networking.subnet_clamav_mirror, + ), + tags=self.tags, + ) + + # Deploy identity server + identity = SREIdentityComponent( + "sre_identity", + self.stack_name, + SREIdentityProps( + dns_server_ip=dns.ip_address, + dockerhub_credentials=dockerhub_credentials, + entra_application_name=f"sre-{self.config.name}-apricot", + entra_auth_token=self.graph_api_token, + entra_tenant_id=shm_entra_tenant_id, + location=self.config.azure.location, + resource_group_name=resource_group.name, + shm_fqdn=shm_fqdn, + sre_fqdn=networking.sre_fqdn, + storage_account_key=data.storage_account_data_configuration_key, + storage_account_name=data.storage_account_data_configuration_name, + subnet_containers=networking.subnet_identity_containers, + ), + tags=self.tags, + ) + + # Deploy frontend application gateway + SREApplicationGatewayComponent( + "sre_application_gateway", + self.stack_name, + SREApplicationGatewayProps( + key_vault_certificate_id=data.sre_fqdn_certificate_secret_id, + key_vault_identity=data.managed_identity, + location=self.config.azure.location, + resource_group=resource_group, + subnet_application_gateway=networking.subnet_application_gateway, + subnet_guacamole_containers=networking.subnet_guacamole_containers, + sre_fqdn=networking.sre_fqdn, + ), + tags=self.tags, + ) + + # Deploy containerised remote desktop gateway + remote_desktop = SRERemoteDesktopComponent( + "sre_remote_desktop", + self.stack_name, + SRERemoteDesktopProps( + allow_copy=self.config.sre.remote_desktop.allow_copy, + allow_paste=self.config.sre.remote_desktop.allow_paste, + database_password=data.password_user_database_admin, + dns_server_ip=dns.ip_address, + dockerhub_credentials=dockerhub_credentials, + entra_application_fqdn=networking.sre_fqdn, + entra_application_name=f"sre-{self.config.name}-guacamole", + entra_auth_token=self.graph_api_token, + entra_tenant_id=shm_entra_tenant_id, + ldap_group_filter=ldap_group_filter, + ldap_group_search_base=ldap_group_search_base, + ldap_server_hostname=identity.hostname, + ldap_server_port=identity.server_port, + ldap_user_filter=ldap_user_filter, + ldap_user_search_base=ldap_user_search_base, + location=self.config.azure.location, + resource_group_name=resource_group.name, + storage_account_key=data.storage_account_data_configuration_key, + storage_account_name=data.storage_account_data_configuration_name, + subnet_guacamole_containers_support=networking.subnet_guacamole_containers_support, + subnet_guacamole_containers=networking.subnet_guacamole_containers, + ), + tags=self.tags, + ) + + # Deploy containerised user services + user_services = SREUserServicesComponent( + "sre_user_services", + self.stack_name, + SREUserServicesProps( + database_service_admin_password=data.password_database_service_admin, + databases=self.config.sre.databases, + dns_server_ip=dns.ip_address, + dockerhub_credentials=dockerhub_credentials, + gitea_database_password=data.password_gitea_database_admin, + hedgedoc_database_password=data.password_hedgedoc_database_admin, + ldap_server_hostname=identity.hostname, + ldap_server_port=identity.server_port, + ldap_user_filter=ldap_user_filter, + ldap_username_attribute=ldap_username_attribute, + ldap_user_search_base=ldap_user_search_base, + location=self.config.azure.location, + nexus_admin_password=data.password_nexus_admin, + resource_group_name=resource_group.name, + software_packages=self.config.sre.software_packages, + sre_fqdn=networking.sre_fqdn, + storage_account_key=data.storage_account_data_configuration_key, + storage_account_name=data.storage_account_data_configuration_name, + subnet_containers=networking.subnet_user_services_containers, + subnet_containers_support=networking.subnet_user_services_containers_support, + subnet_databases=networking.subnet_user_services_databases, + subnet_software_repositories=networking.subnet_user_services_software_repositories, + ), + tags=self.tags, + ) + + # Deploy monitoring + monitoring = SREMonitoringComponent( + "sre_monitoring", + self.stack_name, + SREMonitoringProps( + dns_private_zones=dns.private_zones, + location=self.config.azure.location, + resource_group_name=resource_group.name, + subnet=networking.subnet_monitoring, + timezone=self.config.sre.timezone, + ), + tags=self.tags, + ) + + # Deploy workspaces + workspaces = SREWorkspacesComponent( + "sre_workspaces", + self.stack_name, + SREWorkspacesProps( + admin_password=data.password_workspace_admin, + apt_proxy_server_hostname=apt_proxy_server.hostname, + clamav_mirror_hostname=clamav_mirror.hostname, + data_collection_rule_id=monitoring.data_collection_rule_vms.id, + data_collection_endpoint_id=monitoring.data_collection_endpoint.id, + database_service_admin_password=data.password_database_service_admin, + ldap_group_filter=ldap_group_filter, + ldap_group_search_base=ldap_group_search_base, + ldap_server_hostname=identity.hostname, + ldap_server_port=identity.server_port, + ldap_user_filter=ldap_user_filter, + ldap_user_search_base=ldap_user_search_base, + location=self.config.azure.location, + maintenance_configuration_id=monitoring.maintenance_configuration.id, + resource_group_name=resource_group.name, + software_repository_hostname=user_services.software_repositories.hostname, + sre_name=self.config.name, + storage_account_data_desired_state_name=data.storage_account_data_desired_state_name, + storage_account_data_private_user_name=data.storage_account_data_private_user_name, + storage_account_data_private_sensitive_name=data.storage_account_data_private_sensitive_name, + subnet_workspaces=networking.subnet_workspaces, + subscription_name=self.context.subscription_name, + virtual_network=networking.virtual_network, + vm_details=list(enumerate(self.config.sre.workspace_skus)), + ), + tags=self.tags, + ) + + # Deploy backup service + SREBackupComponent( + "sre_backup", + self.stack_name, + SREBackupProps( + location=self.config.azure.location, + resource_group_name=resource_group.name, + storage_account_data_private_sensitive_id=data.storage_account_data_private_sensitive_id, + storage_account_data_private_sensitive_name=data.storage_account_data_private_sensitive_name, + ), + tags=self.tags, + ) + + # Export values for later use + pulumi.export("data", data.exports) + pulumi.export("ldap", ldap_group_names) + pulumi.export("remote_desktop", remote_desktop.exports) + pulumi.export("workspaces", workspaces.exports) diff --git a/data_safe_haven/infrastructure/programs/imperative_shm.py b/data_safe_haven/infrastructure/programs/imperative_shm.py new file mode 100644 index 0000000000..b13ec1680e --- /dev/null +++ b/data_safe_haven/infrastructure/programs/imperative_shm.py @@ -0,0 +1,160 @@ +from data_safe_haven.config import Context, SHMConfig +from data_safe_haven.exceptions import ( + DataSafeHavenAzureError, + DataSafeHavenMicrosoftGraphError, +) +from data_safe_haven.external import AzureSdk, GraphApi +from data_safe_haven.logging import get_logger + + +class ImperativeSHM: + """Azure resources to support Data Safe Haven context""" + + def __init__(self, context: Context, config: SHMConfig) -> None: + self.azure_sdk_: AzureSdk | None = None + self.config = config + self.context = context + self.tags = {"component": "SHM", "deployed with": "Python"} | context.tags + + @property + def azure_sdk(self) -> AzureSdk: + """Load AzureAPI on demand + + Returns: + AzureSdk: An initialised AzureSdk object + """ + if not self.azure_sdk_: + self.azure_sdk_ = AzureSdk( + subscription_name=self.context.subscription_name, + ) + return self.azure_sdk_ + + def deploy(self) -> None: + """Deploy all desired resources + + Raises: + DataSafeHavenAzureError if any resources cannot be created + """ + logger = get_logger() + logger.info(f"Preparing to deploy [green]{self.context.description}[/] SHM.") + # Deploy the resources needed by Pulumi + try: + resource_group = self.azure_sdk.ensure_resource_group( + location=self.config.azure.location, + resource_group_name=self.context.resource_group_name, + tags=self.tags, + ) + if not resource_group.name: + msg = f"Resource group '{self.context.resource_group_name}' was not created." + raise DataSafeHavenAzureError(msg) + identity = self.azure_sdk.ensure_managed_identity( + identity_name=self.context.managed_identity_name, + location=resource_group.location, + resource_group_name=resource_group.name, + ) + storage_account = self.azure_sdk.ensure_storage_account( + location=resource_group.location, + resource_group_name=resource_group.name, + storage_account_name=self.context.storage_account_name, + tags=self.tags, + ) + if not storage_account.name: + msg = f"Storage account '{self.context.storage_account_name}' was not created." + raise DataSafeHavenAzureError(msg) + _ = self.azure_sdk.ensure_storage_blob_container( + container_name=self.context.storage_container_name, + resource_group_name=resource_group.name, + storage_account_name=storage_account.name, + ) + _ = self.azure_sdk.ensure_storage_blob_container( + container_name=self.context.pulumi_storage_container_name, + resource_group_name=resource_group.name, + storage_account_name=storage_account.name, + ) + keyvault = self.azure_sdk.ensure_keyvault( + admin_group_id=self.config.shm.admin_group_id, + key_vault_name=self.context.key_vault_name, + location=resource_group.location, + managed_identity=identity, + resource_group_name=resource_group.name, + tags=self.tags, + ) + if not keyvault.name: + msg = f"Keyvault '{self.context.key_vault_name}' was not created." + raise DataSafeHavenAzureError(msg) + self.azure_sdk.ensure_keyvault_key( + key_name=self.context.pulumi_encryption_key_name, + key_vault_name=keyvault.name, + ) + except DataSafeHavenAzureError as exc: + msg = "Failed to deploy resources needed by Pulumi." + raise DataSafeHavenAzureError(msg) from exc + + # Deploy common resources that will be needed by SREs + try: + zone = self.azure_sdk.ensure_dns_zone( + resource_group_name=resource_group.name, + zone_name=self.config.shm.fqdn, + tags=self.tags, + ) + if not zone.name_servers: + msg = f"DNS zone '{self.config.shm.fqdn}' was not created." + raise DataSafeHavenAzureError(msg) + nameservers = [str(n) for n in zone.name_servers] + self.azure_sdk.ensure_dns_caa_record( + record_flags=0, + record_name="@", + record_tag="issue", + record_value="letsencrypt.org", + resource_group_name=resource_group.name, + ttl=3600, + zone_name=self.config.shm.fqdn, + ) + except DataSafeHavenAzureError as exc: + msg = "Failed to create SHM resources." + raise DataSafeHavenAzureError(msg) from exc + + # Add the SHM domain to the Entra ID via interactive GraphAPI + try: + # Generate the verification record + graph_api = GraphApi.from_scopes( + scopes=[ + "Application.ReadWrite.All", + "Domain.ReadWrite.All", + "Group.ReadWrite.All", + ], + tenant_id=self.config.shm.entra_tenant_id, + ) + verification_record = graph_api.add_custom_domain(self.config.shm.fqdn) + # Add the record to DNS + self.azure_sdk.ensure_dns_txt_record( + record_name="@", + record_value=verification_record, + resource_group_name=resource_group.name, + ttl=3600, + zone_name=self.config.shm.fqdn, + ) + # Verify the record + graph_api.verify_custom_domain( + self.config.shm.fqdn, + nameservers, + ) + except (DataSafeHavenMicrosoftGraphError, DataSafeHavenAzureError) as exc: + msg = f"Failed to add custom domain '{self.config.shm.fqdn}' to Entra ID." + raise DataSafeHavenAzureError(msg) from exc + + def teardown(self) -> None: + """Destroy all created resources + + Raises: + DataSafeHavenAzureError if any resources cannot be destroyed + """ + logger = get_logger() + try: + logger.info( + f"Removing [green]{self.context.description}[/] resource group {self.context.resource_group_name}." + ) + self.azure_sdk.remove_resource_group(self.context.resource_group_name) + except DataSafeHavenAzureError as exc: + msg = "Failed to destroy context resources." + raise DataSafeHavenAzureError(msg) from exc diff --git a/data_safe_haven/infrastructure/programs/sre/__init__.py b/data_safe_haven/infrastructure/programs/sre/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/data_safe_haven/infrastructure/programs/sre/application_gateway.py b/data_safe_haven/infrastructure/programs/sre/application_gateway.py new file mode 100644 index 0000000000..5eadd4e148 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/application_gateway.py @@ -0,0 +1,295 @@ +"""Pulumi component for SRE application gateway""" + +from collections.abc import Mapping +from typing import Any + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import managedidentity, network, resources + +from data_safe_haven.infrastructure.common import ( + get_available_ips_from_subnet, + get_id_from_rg, + get_id_from_subnet, + get_name_from_rg, +) + + +class SREApplicationGatewayProps: + """Properties for SREApplicationGatewayComponent""" + + user_assigned_identities: Output[dict[str, dict[Any, Any]]] + + def __init__( + self, + key_vault_certificate_id: Input[str], + key_vault_identity: Input[managedidentity.UserAssignedIdentity], + location: Input[str], + resource_group: Input[resources.ResourceGroup], + sre_fqdn: Input[str], + subnet_application_gateway: Input[network.GetSubnetResult], + subnet_guacamole_containers: Input[network.GetSubnetResult], + ) -> None: + self.key_vault_certificate_id = key_vault_certificate_id + self.location = location + self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg) + self.resource_group_name = Output.from_input(resource_group).apply( + get_name_from_rg + ) + self.sre_fqdn = sre_fqdn + self.subnet_application_gateway_id = Output.from_input( + subnet_application_gateway + ).apply(get_id_from_subnet) + self.subnet_guacamole_containers_ip_addresses = Output.from_input( + subnet_guacamole_containers + ).apply(get_available_ips_from_subnet) + # Unwrap key vault identity so that it has the required type + self.user_assigned_identities = Output.from_input(key_vault_identity).apply( + lambda identity: identity.id.apply(lambda id_: {str(id_): {}}) + ) + + +class SREApplicationGatewayComponent(ComponentResource): + """Deploy application gateway with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREApplicationGatewayProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:ApplicationGatewayComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "application gateway"} | (tags if tags else {}) + + # Define public IP address + public_ip = network.PublicIPAddress( + f"{self._name}_public_ip", + location=props.location, + public_ip_address_name=f"{stack_name}-public-ip", + public_ip_allocation_method=network.IpAllocationMethod.STATIC, + resource_group_name=props.resource_group_name, + sku=network.PublicIPAddressSkuArgs( + name=network.PublicIPAddressSkuName.STANDARD + ), + opts=child_opts, + tags=child_tags, + ) + + # Link the public IP address to the SRE domain + network.RecordSet( + f"{self._name}_a_record", + a_records=public_ip.ip_address.apply( + lambda ip: [network.ARecordArgs(ipv4_address=ip)] if ip else [] + ), + record_type="A", + relative_record_set_name="@", + resource_group_name=props.resource_group_name, + ttl=30, + zone_name=props.sre_fqdn, + opts=child_opts, + ) + + # Define application gateway + application_gateway_name = f"{stack_name}-ag-entrypoint" + self.application_gateway = network.ApplicationGateway( + f"{self._name}_application_gateway", + application_gateway_name=application_gateway_name, + backend_address_pools=[ + # Guacamole private IP addresses + network.ApplicationGatewayBackendAddressPoolArgs( + backend_addresses=props.subnet_guacamole_containers_ip_addresses.apply( + lambda ip_addresses: [ + network.ApplicationGatewayBackendAddressArgs( + ip_address=ip_address + ) + for ip_address in ip_addresses + ] + ), + name="appGatewayBackendGuacamole", + ), + ], + backend_http_settings_collection=[ + network.ApplicationGatewayBackendHttpSettingsArgs( + cookie_based_affinity=network.ApplicationGatewayCookieBasedAffinity.ENABLED, + name="appGatewayBackendHttpSettings", + port=80, + protocol="Http", + request_timeout=30, + ), + ], + frontend_ip_configurations=[ + network.ApplicationGatewayFrontendIPConfigurationArgs( + name="appGatewayFrontendIP", + private_ip_allocation_method="Dynamic", + public_ip_address=network.SubResourceArgs(id=public_ip.id), + ) + ], + frontend_ports=[ + network.ApplicationGatewayFrontendPortArgs( + name="appGatewayFrontendHttp", + port=80, + ), + network.ApplicationGatewayFrontendPortArgs( + name="appGatewayFrontendHttps", + port=443, + ), + ], + gateway_ip_configurations=[ + network.ApplicationGatewayIPConfigurationArgs( + name="appGatewayIP", + subnet=network.SubResourceArgs( + id=props.subnet_application_gateway_id + ), + ) + ], + http_listeners=[ + # Guacamole http listener + network.ApplicationGatewayHttpListenerArgs( + frontend_ip_configuration=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/frontendIPConfigurations/appGatewayFrontendIP", + ) + ), + frontend_port=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/frontendPorts/appGatewayFrontendHttp", + ) + ), + host_name=props.sre_fqdn, + name="GuacamoleHttpListener", + protocol="Http", + ), + # Guacamole https listener + network.ApplicationGatewayHttpListenerArgs( + frontend_ip_configuration=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/frontendIPConfigurations/appGatewayFrontendIP", + ) + ), + frontend_port=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/frontendPorts/appGatewayFrontendHttps", + ) + ), + host_name=props.sre_fqdn, + name="GuacamoleHttpsListener", + protocol="Https", + ssl_certificate=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/sslCertificates/letsencryptcertificate", + ), + ), + ), + ], + identity=network.ManagedServiceIdentityArgs( + type=network.ResourceIdentityType.USER_ASSIGNED, + user_assigned_identities=props.user_assigned_identities, + ), + location=props.location, + redirect_configurations=[ + # Guacamole HTTP redirect + network.ApplicationGatewayRedirectConfigurationArgs( + include_path=True, + include_query_string=True, + name="GuacamoleHttpToHttpsRedirection", + redirect_type="Permanent", + request_routing_rules=[ + network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/requestRoutingRules/HttpToHttpsRedirection", + ), + ) + ], + target_listener=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/httpListeners/GuacamoleHttpsListener", + ) + ), + ), + ], + request_routing_rules=[ + # Guacamole routing + network.ApplicationGatewayRequestRoutingRuleArgs( + http_listener=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/httpListeners/GuacamoleHttpListener", + ) + ), + redirect_configuration=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/redirectConfigurations/GuacamoleHttpToHttpsRedirection", + ) + ), + name="GuacamoleHttpRouting", + priority=200, + rule_type="Basic", + ), + network.ApplicationGatewayRequestRoutingRuleArgs( + backend_address_pool=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/backendAddressPools/appGatewayBackendGuacamole", + ) + ), + backend_http_settings=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/backendHttpSettingsCollection/appGatewayBackendHttpSettings", + ) + ), + http_listener=network.SubResourceArgs( + id=Output.concat( + props.resource_group_id, + f"/providers/Microsoft.Network/applicationGateways/{application_gateway_name}/httpListeners/GuacamoleHttpsListener", + ) + ), + name="GuacamoleHttpsRouting", + priority=100, + rule_type="Basic", + ), + ], + resource_group_name=props.resource_group_name, + sku=network.ApplicationGatewaySkuArgs( + capacity=1, + name="Standard_v2", + tier="Standard_v2", + ), + ssl_certificates=[ + network.ApplicationGatewaySslCertificateArgs( + key_vault_secret_id=props.key_vault_certificate_id, + name="letsencryptcertificate", + ), + ], + ssl_policy=network.ApplicationGatewaySslPolicyArgs( + # We start with the Mozilla (https://wiki.mozilla.org/Security/Server_Side_TLS) + # recommended SSL ciphers then remove and not in this supported ciphers list + # (https://learn.microsoft.com/en-us/azure/application-gateway/application-gateway-ssl-policy-overview#cipher-suites) + # + # The following TLSv1.3 ciphers are always included: + # - TLS_AES_256_GCM_SHA384 + # - TLS_AES_128_GCM_SHA256 + # + # Ordering is important: earlier ciphers will be tried first. + cipher_suites=[ + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + ], + min_protocol_version=network.ApplicationGatewaySslProtocol.TL_SV1_2, + policy_type="CustomV2", + ), + opts=child_opts, + tags=child_tags, + ) diff --git a/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py b/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py new file mode 100644 index 0000000000..0a6964772d --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py @@ -0,0 +1,187 @@ +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, storage + +from data_safe_haven.infrastructure.common import ( + get_id_from_subnet, + get_ip_address_from_container_group, +) +from data_safe_haven.infrastructure.components import ( + FileShareFile, + FileShareFileProps, + LocalDnsRecordComponent, + LocalDnsRecordProps, +) +from data_safe_haven.types import PermittedDomains + + +class SREAptProxyServerProps: + """Properties for SREAptProxyServerComponent""" + + def __init__( + self, + containers_subnet: Input[str], + dns_server_ip: Input[str], + location: Input[str], + resource_group_name: Input[str], + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + ) -> None: + self.containers_subnet_id = Output.from_input(containers_subnet).apply( + get_id_from_subnet + ) + self.dns_server_ip = dns_server_ip + self.location = location + self.resource_group_name = resource_group_name + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + + +class SREAptProxyServerComponent(ComponentResource): + """Deploy APT proxy server with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREAptProxyServerProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:AptProxyServerComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "apt proxy server"} | (tags if tags else {}) + + # Define configuration file shares + file_share_apt_proxy_server = storage.FileShare( + f"{self._name}_file_share_apt_proxy_server", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="apt-proxy-server", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + + # Upload allowed repositories + repositories = "\n".join(PermittedDomains.APT_REPOSITORIES) + file_share_apt_proxy_server_repositories = FileShareFile( + f"{self._name}_file_share_apt_proxy_server_repositories", + FileShareFileProps( + destination_path="repositories.acl", + share_name=file_share_apt_proxy_server.name, + file_contents=repositories, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_apt_proxy_server) + ), + ) + + # Define the container group with squid-deb-proxy + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-apt-proxy-server", + containers=[ + containerinstance.ContainerArgs( + image="ghcr.io/alan-turing-institute/squid-deb-proxy:0.0.1", + name="squid-deb-proxy"[:63], + environment_variables=[], + # All Azure Container Instances need to expose port 80 on at least + # one container. In this case, there is nothing there. + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + containerinstance.ContainerPortArgs( + port=8000, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=1, + memory_in_gb=1, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/app/allowlists", + name="proxy-app-allowlists", + read_only=True, + ), + ], + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + containerinstance.PortArgs( + port=8000, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs( + id=props.containers_subnet_id + ) + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_apt_proxy_server.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="proxy-app-allowlists", + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, + depends_on=[ + file_share_apt_proxy_server, + file_share_apt_proxy_server_repositories, + ], + replace_on_changes=["containers"], + ), + ), + tags=child_tags, + ) + + # Register the container group in the SRE DNS zone + local_dns = LocalDnsRecordComponent( + f"{self._name}_apt_proxy_server_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=get_ip_address_from_container_group(container_group), + record_name="apt", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_group) + ), + ) + + # Register outputs + self.hostname = local_dns.hostname diff --git a/data_safe_haven/infrastructure/programs/sre/backup.py b/data_safe_haven/infrastructure/programs/sre/backup.py new file mode 100644 index 0000000000..e4ed5723f3 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/backup.py @@ -0,0 +1,194 @@ +"""Pulumi component for SRE backup""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, ResourceOptions +from pulumi_azure_native import dataprotection + + +class SREBackupProps: + """Properties for SREBackupComponent""" + + def __init__( + self, + location: Input[str], + resource_group_name: Input[str], + storage_account_data_private_sensitive_id: Input[str], + storage_account_data_private_sensitive_name: Input[str], + ) -> None: + self.location = location + self.resource_group_name = resource_group_name + self.storage_account_data_private_sensitive_id = ( + storage_account_data_private_sensitive_id + ) + self.storage_account_data_private_sensitive_name = ( + storage_account_data_private_sensitive_name + ) + + +class SREBackupComponent(ComponentResource): + """Deploy SRE backup with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREBackupProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:BackupComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "backup"} | (tags if tags else {}) + + # Deploy backup vault + backup_vault = dataprotection.BackupVault( + f"{self._name}_backup_vault", + identity=dataprotection.DppIdentityDetailsArgs( + type="SystemAssigned", + ), + location=props.location, + properties=dataprotection.BackupVaultArgs( + storage_settings=[ + dataprotection.StorageSettingArgs( + datastore_type=dataprotection.StorageSettingStoreTypes.VAULT_STORE, + type=dataprotection.StorageSettingTypes.LOCALLY_REDUNDANT, + ) + ], + ), + resource_group_name=props.resource_group_name, + vault_name=f"{stack_name}-bv-backup", + opts=child_opts, + tags=child_tags, + ) + + # Backup policy for blobs + backup_policy_blobs = dataprotection.BackupPolicy( + f"{self._name}_backup_policy_blobs", + backup_policy_name="backup-policy-blobs", + properties=dataprotection.BackupPolicyArgs( + datasource_types=["Microsoft.Storage/storageAccounts/blobServices"], + object_type="BackupPolicy", + policy_rules=[ + # Retain for 30 days + dataprotection.AzureRetentionRuleArgs( + is_default=True, + lifecycles=[ + dataprotection.SourceLifeCycleArgs( + delete_after=dataprotection.AbsoluteDeleteOptionArgs( + duration="P30D", + object_type="AbsoluteDeleteOption", + ), + source_data_store=dataprotection.DataStoreInfoBaseArgs( + data_store_type=dataprotection.DataStoreTypes.OPERATIONAL_STORE, + object_type="DataStoreInfoBase", + ), + ), + ], + name="Default", + object_type="AzureRetentionRule", + ), + ], + ), + resource_group_name=props.resource_group_name, + vault_name=backup_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=backup_vault) + ), + ) + + # Backup policy for disks + dataprotection.BackupPolicy( + f"{self._name}_backup_policy_disks", + backup_policy_name="backup-policy-disks", + properties=dataprotection.BackupPolicyArgs( + datasource_types=["Microsoft.Compute/disks"], + object_type="BackupPolicy", + policy_rules=[ + # Backup at 02:00 every day + dataprotection.AzureBackupRuleArgs( + backup_parameters=dataprotection.AzureBackupParamsArgs( + backup_type="Incremental", + object_type="AzureBackupParams", + ), + data_store=dataprotection.DataStoreInfoBaseArgs( + data_store_type=dataprotection.DataStoreTypes.OPERATIONAL_STORE, + object_type="DataStoreInfoBase", + ), + name="BackupDaily", + object_type="AzureBackupRule", + trigger=dataprotection.ScheduleBasedTriggerContextArgs( + object_type="ScheduleBasedTriggerContext", + schedule=dataprotection.BackupScheduleArgs( + repeating_time_intervals=[ + "R/2023-01-01T02:00:00+00:00/P1D" + ], + ), + tagging_criteria=[ + dataprotection.TaggingCriteriaArgs( + is_default=True, + tag_info=dataprotection.RetentionTagArgs( + tag_name="Default", + ), + tagging_priority=99, + ) + ], + ), + ), + # Retain for 30 days + dataprotection.AzureRetentionRuleArgs( + is_default=True, + lifecycles=[ + dataprotection.SourceLifeCycleArgs( + delete_after=dataprotection.AbsoluteDeleteOptionArgs( + duration="P30D", + object_type="AbsoluteDeleteOption", + ), + source_data_store=dataprotection.DataStoreInfoBaseArgs( + data_store_type=dataprotection.DataStoreTypes.OPERATIONAL_STORE, + object_type="DataStoreInfoBase", + ), + ), + ], + name="Default", + object_type="AzureRetentionRule", + ), + ], + ), + resource_group_name=props.resource_group_name, + vault_name=backup_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=backup_vault) + ), + ) + + # Backup instance for blobs + dataprotection.BackupInstance( + f"{self._name}_backup_instance_blobs", + backup_instance_name="backup-instance-blobs", + properties=dataprotection.BackupInstanceArgs( + data_source_info=dataprotection.DatasourceArgs( + resource_id=props.storage_account_data_private_sensitive_id, + datasource_type="Microsoft.Storage/storageAccounts/blobServices", + object_type="Datasource", + resource_location=props.location, + resource_name=props.storage_account_data_private_sensitive_name, + resource_type="Microsoft.Storage/storageAccounts", + resource_uri=props.storage_account_data_private_sensitive_id, + ), + object_type="BackupInstance", + policy_info=dataprotection.PolicyInfoArgs( + policy_id=backup_policy_blobs.id, + ), + friendly_name="BlobBackupSensitiveData", + ), + resource_group_name=props.resource_group_name, + vault_name=backup_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=backup_policy_blobs) + ), + ) + + # Backup instance for disks + # We currently have no disks except OS disks so no backup is needed + # This may change in future, so we leave the policy above diff --git a/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py b/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py new file mode 100644 index 0000000000..afc47ac31d --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py @@ -0,0 +1,161 @@ +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, network, storage + +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_id_from_subnet, + get_ip_address_from_container_group, +) +from data_safe_haven.infrastructure.components import ( + LocalDnsRecordComponent, + LocalDnsRecordProps, +) + + +class SREClamAVMirrorProps: + """Properties for SREClamAVMirrorComponent""" + + def __init__( + self, + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + location: Input[str], + resource_group_name: Input[str], + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + subnet: Input[network.GetSubnetResult], + ) -> None: + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.location = location + self.resource_group_name = resource_group_name + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + self.subnet_id = Output.from_input(subnet).apply(get_id_from_subnet) + + +class SREClamAVMirrorComponent(ComponentResource): + """Deploy ClamAV mirror with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREClamAVMirrorProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:ClamAVMirrorComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = tags if tags else {} + + # Define configuration file shares + file_share_clamav_mirror = storage.FileShare( + f"{self._name}_file_share_clamav_mirror", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="clamav-mirror", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + + # Define the container group with ClamAV + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-clamav", + containers=[ + containerinstance.ContainerArgs( + image="chmey/clamav-mirror", + name="clamav-mirror"[:63], + environment_variables=[], + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=2, + memory_in_gb=2, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/clamav", + name="clamavmirror-clamavmirror-clamav", + read_only=False, + ), + ], + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret(props.dockerhub_credentials.access_token), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs(id=props.subnet_id), + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_clamav_mirror.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="clamavmirror-clamavmirror-clamav", + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, + replace_on_changes=["containers"], + ), + ), + tags=child_tags, + ) + + # Register the container group in the SRE DNS zone + local_dns = LocalDnsRecordComponent( + f"{self._name}_clamav_mirror_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=get_ip_address_from_container_group(container_group), + record_name="clamav", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_group) + ), + ) + + # Register outputs + self.hostname = local_dns.hostname diff --git a/data_safe_haven/infrastructure/programs/sre/data.py b/data_safe_haven/infrastructure/programs/sre/data.py new file mode 100644 index 0000000000..524d743d44 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/data.py @@ -0,0 +1,942 @@ +"""Pulumi component for SRE data""" + +from collections.abc import Mapping, Sequence +from typing import ClassVar + +import pulumi_random +from pulumi import ComponentResource, FileAsset, Input, Output, ResourceOptions +from pulumi_azure_native import ( + authorization, + keyvault, + managedidentity, + network, + resources, + storage, +) + +from data_safe_haven.external import AzureIPv4Range +from data_safe_haven.functions import ( + alphanumeric, + get_key_vault_name, + replace_separators, + seeded_uuid, + sha256hash, + truncate_tokens, +) +from data_safe_haven.infrastructure.common import ( + get_id_from_rg, + get_id_from_subnet, + get_name_from_rg, +) +from data_safe_haven.infrastructure.components import ( + BlobContainerAcl, + BlobContainerAclProps, + SSLCertificate, + SSLCertificateProps, +) +from data_safe_haven.resources import resources_path +from data_safe_haven.types import AzureDnsZoneNames + + +class SREDataProps: + """Properties for SREDataComponent""" + + def __init__( + self, + admin_email_address: Input[str], + admin_group_id: Input[str], + admin_ip_addresses: Input[Sequence[str]], + data_provider_ip_addresses: Input[Sequence[str]], + dns_private_zones: Input[dict[str, network.PrivateZone]], + dns_record: Input[network.RecordSet], + dns_server_admin_password: Input[pulumi_random.RandomPassword], + location: Input[str], + resource_group: Input[resources.ResourceGroup], + sre_fqdn: Input[str], + subnet_data_configuration: Input[network.GetSubnetResult], + subnet_data_desired_state: Input[network.GetSubnetResult], + subnet_data_private: Input[network.GetSubnetResult], + subscription_id: Input[str], + subscription_name: Input[str], + tenant_id: Input[str], + ) -> None: + self.admin_email_address = admin_email_address + self.admin_group_id = admin_group_id + self.data_configuration_ip_addresses = admin_ip_addresses + self.data_private_sensitive_ip_addresses = Output.all( + admin_ip_addresses, data_provider_ip_addresses + ).apply( + lambda address_lists: { + ip for address_list in address_lists for ip in address_list + } + ) + self.dns_private_zones = dns_private_zones + self.dns_record = dns_record + self.password_dns_server_admin = dns_server_admin_password + self.location = location + self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg) + self.resource_group_name = Output.from_input(resource_group).apply( + get_name_from_rg + ) + self.sre_fqdn = sre_fqdn + self.subnet_data_configuration_id = Output.from_input( + subnet_data_configuration + ).apply(get_id_from_subnet) + self.subnet_data_desired_state_id = Output.from_input( + subnet_data_desired_state + ).apply(get_id_from_subnet) + self.subnet_data_private_id = Output.from_input(subnet_data_private).apply( + get_id_from_subnet + ) + self.subscription_id = subscription_id + self.subscription_name = subscription_name + self.tenant_id = tenant_id + + +class SREDataComponent(ComponentResource): + """Deploy SRE data with Pulumi""" + + azure_role_ids: ClassVar[dict[str, str]] = { + "Storage Blob Data Owner": "b7e6dc6d-f1e8-4753-8033-0f276bb0955b" + } + + def __init__( + self, + name: str, + stack_name: str, + props: SREDataProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:DataComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "data"} | (tags if tags else {}) + + # Define Key Vault reader + identity_key_vault_reader = managedidentity.UserAssignedIdentity( + f"{self._name}_id_key_vault_reader", + location=props.location, + resource_group_name=props.resource_group_name, + resource_name_=f"{stack_name}-id-key-vault-reader", + opts=child_opts, + tags=child_tags, + ) + + # Define SRE KeyVault + key_vault = keyvault.Vault( + f"{self._name}_kv_secrets", + location=props.location, + properties=keyvault.VaultPropertiesArgs( + access_policies=[ + keyvault.AccessPolicyEntryArgs( + object_id=props.admin_group_id, + permissions=keyvault.PermissionsArgs( + certificates=[ + "get", + "list", + "delete", + "create", + "import", + "update", + "managecontacts", + "getissuers", + "listissuers", + "setissuers", + "deleteissuers", + "manageissuers", + "recover", + "purge", + ], + keys=[ + "encrypt", + "decrypt", + "sign", + "verify", + "get", + "list", + "create", + "update", + "import", + "delete", + "backup", + "restore", + "recover", + "purge", + ], + secrets=[ + "get", + "list", + "set", + "delete", + "backup", + "restore", + "recover", + "purge", + ], + ), + tenant_id=props.tenant_id, + ), + keyvault.AccessPolicyEntryArgs( + object_id=identity_key_vault_reader.principal_id, + permissions=keyvault.PermissionsArgs( + certificates=[ + "get", + "list", + ], + keys=[ + "get", + "list", + ], + secrets=[ + "get", + "list", + ], + ), + tenant_id=props.tenant_id, + ), + ], + enabled_for_deployment=True, + enabled_for_disk_encryption=True, + enabled_for_template_deployment=True, + sku=keyvault.SkuArgs( + family="A", + name=keyvault.SkuName.STANDARD, + ), + soft_delete_retention_in_days=7, # minimum allowed + tenant_id=props.tenant_id, + ), + resource_group_name=props.resource_group_name, + vault_name=get_key_vault_name(stack_name)[:24], # maximum of 24 characters + opts=child_opts, + tags=child_tags, + ) + + # Define SSL certificate for this FQDN + sre_fqdn_certificate = SSLCertificate( + f"{self._name}_kvc_https_certificate", + SSLCertificateProps( + certificate_secret_name=Output.from_input(props.sre_fqdn).apply( + lambda s: replace_separators(s, "-") + ), + domain_name=props.sre_fqdn, + admin_email_address=props.admin_email_address, + key_vault_name=key_vault.name, + networking_resource_group_name=props.resource_group_name, + subscription_name=props.subscription_name, + ), + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + depends_on=[props.dns_record], + parent=key_vault, + ), # we need the delegation NS record to exist before generating the certificate + ), + ) + + # Secret: database service admin password + password_database_service_admin = pulumi_random.RandomPassword( + f"{self._name}_password_database_service_admin", + length=20, + special=True, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + ) + keyvault.Secret( + f"{self._name}_kvs_password_database_service_admin", + properties=keyvault.SecretPropertiesArgs( + value=password_database_service_admin.result, + ), + resource_group_name=props.resource_group_name, + secret_name="password-database-service-admin", + vault_name=key_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=password_database_service_admin) + ), + tags=child_tags, + ) + + # Secret: DNS server admin password + keyvault.Secret( + f"{self._name}_kvs_password_dns_server_admin", + properties=keyvault.SecretPropertiesArgs( + value=props.password_dns_server_admin.result, + ), + resource_group_name=props.resource_group_name, + secret_name="password-dns-server-admin", + vault_name=key_vault.name, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + tags=child_tags, + ) + + # Secret: Gitea database admin password + password_gitea_database_admin = pulumi_random.RandomPassword( + f"{self._name}_password_gitea_database_admin", + length=20, + special=True, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + ) + keyvault.Secret( + f"{self._name}_kvs_password_gitea_database_admin", + properties=keyvault.SecretPropertiesArgs( + value=password_gitea_database_admin.result + ), + resource_group_name=props.resource_group_name, + secret_name="password-gitea-database-admin", + vault_name=key_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=password_gitea_database_admin) + ), + tags=child_tags, + ) + + # Secret: Hedgedoc database admin password + password_hedgedoc_database_admin = pulumi_random.RandomPassword( + f"{self._name}_password_hedgedoc_database_admin", + length=20, + special=True, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + ) + keyvault.Secret( + f"{self._name}_kvs_password_hedgedoc_database_admin", + properties=keyvault.SecretPropertiesArgs( + value=password_hedgedoc_database_admin.result + ), + resource_group_name=props.resource_group_name, + secret_name="password-hedgedoc-database-admin", + vault_name=key_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=password_hedgedoc_database_admin) + ), + tags=child_tags, + ) + + # Secret: Nexus admin password + password_nexus_admin = pulumi_random.RandomPassword( + f"{self._name}_password_nexus_admin", + length=20, + special=True, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + ) + keyvault.Secret( + f"{self._name}_kvs_password_nexus_admin", + properties=keyvault.SecretPropertiesArgs(value=password_nexus_admin.result), + resource_group_name=props.resource_group_name, + secret_name="password-nexus-admin", + vault_name=key_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=password_nexus_admin) + ), + tags=child_tags, + ) + + # Secret: Guacamole user database admin password + password_user_database_admin = pulumi_random.RandomPassword( + f"{self._name}_password_user_database_admin", + length=20, + special=True, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + ) + kvs_password_user_database_admin = keyvault.Secret( + f"{self._name}_kvs_password_user_database_admin", + properties=keyvault.SecretPropertiesArgs( + value=password_user_database_admin.result + ), + resource_group_name=props.resource_group_name, + secret_name="password-user-database-admin", + vault_name=key_vault.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=password_user_database_admin) + ), + tags=child_tags, + ) + + # Secret: Workspace admin password + password_workspace_admin = pulumi_random.RandomPassword( + f"{self._name}_password_workspace_admin", + length=20, + special=True, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=key_vault)), + ) + keyvault.Secret( + f"{self._name}_kvs_password_workspace_admin", + properties=keyvault.SecretPropertiesArgs( + value=password_workspace_admin.result + ), + resource_group_name=props.resource_group_name, + secret_name="password-workspace-admin", + vault_name=key_vault.name, + opts=ResourceOptions(parent=password_workspace_admin), + tags=child_tags, + ) + + # Deploy configuration data storage account + # - This holds file shares that are mounted by Azure Container Instances + storage_account_data_configuration = storage.StorageAccount( + f"{self._name}_storage_account_data_configuration", + # Note that account names have a maximum of 24 characters + account_name=alphanumeric( + f"{''.join(truncate_tokens(stack_name.split('-'), 14))}configdata" + )[:24], + kind=storage.Kind.STORAGE_V2, + location=props.location, + network_rule_set=storage.NetworkRuleSetArgs( + bypass=storage.Bypass.AZURE_SERVICES, + default_action=storage.DefaultAction.DENY, + ip_rules=Output.from_input(props.data_configuration_ip_addresses).apply( + lambda ip_ranges: [ + storage.IPRuleArgs( + action=storage.Action.ALLOW, + i_p_address_or_range=str(ip_address), + ) + for ip_range in sorted(ip_ranges) + for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips() + ] + ), + virtual_network_rules=[ + storage.VirtualNetworkRuleArgs( + virtual_network_resource_id=props.subnet_data_configuration_id, + ) + ], + ), + resource_group_name=props.resource_group_name, + sku=storage.SkuArgs(name=storage.SkuName.STANDARD_GRS), + opts=child_opts, + tags=child_tags, + ) + # Retrieve configuration data storage account keys + storage_account_data_configuration_keys = Output.all( + account_name=storage_account_data_configuration.name, + resource_group_name=props.resource_group_name, + ).apply( + lambda kwargs: storage.list_storage_account_keys( + account_name=kwargs["account_name"], + resource_group_name=kwargs["resource_group_name"], + ) + ) + # Set up a private endpoint for the configuration data storage account + storage_account_data_configuration_private_endpoint = network.PrivateEndpoint( + f"{storage_account_data_configuration._name}_private_endpoint", + location=props.location, + private_endpoint_name=f"{stack_name}-pep-storage-account-data-configuration", + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["file"], + name=f"{stack_name}-cnxn-pep-storage-account-data-configuration", + private_link_service_id=storage_account_data_configuration.id, + ) + ], + resource_group_name=props.resource_group_name, + subnet=network.SubnetArgs(id=props.subnet_data_configuration_id), + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["custom_dns_configs"], + parent=storage_account_data_configuration, + ), + ), + tags=child_tags, + ) + # Add a private DNS record for each configuration data endpoint custom DNS config + network.PrivateDnsZoneGroup( + f"{storage_account_data_configuration._name}_private_dns_zone_group", + private_dns_zone_configs=[ + network.PrivateDnsZoneConfigArgs( + name=replace_separators( + f"{stack_name}-storage-account-data-configuration-to-{dns_zone_name}", + "-", + ), + private_dns_zone_id=props.dns_private_zones[dns_zone_name].id, + ) + for dns_zone_name in AzureDnsZoneNames.STORAGE_ACCOUNT + ], + private_dns_zone_group_name=f"{stack_name}-dzg-storage-account-data-configuration", + private_endpoint_name=storage_account_data_configuration_private_endpoint.name, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=storage_account_data_configuration) + ), + ) + + # Deploy desired state storage account + # - This holds the /desired_state container that is mounted by workspaces + # - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer + storage_account_data_desired_state = storage.StorageAccount( + f"{self._name}_storage_account_data_desired_state", + # Storage account names have a maximum of 24 characters + account_name=alphanumeric( + f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}" + )[:24], + enable_https_traffic_only=True, + enable_nfs_v3=True, + encryption=storage.EncryptionArgs( + key_source=storage.KeySource.MICROSOFT_STORAGE, + services=storage.EncryptionServicesArgs( + blob=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + file=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + ), + ), + kind=storage.Kind.BLOCK_BLOB_STORAGE, + is_hns_enabled=True, + location=props.location, + network_rule_set=storage.NetworkRuleSetArgs( + bypass=storage.Bypass.AZURE_SERVICES, + default_action=storage.DefaultAction.DENY, + ip_rules=Output.from_input(props.data_configuration_ip_addresses).apply( + lambda ip_ranges: [ + storage.IPRuleArgs( + action=storage.Action.ALLOW, + i_p_address_or_range=str(ip_address), + ) + for ip_range in sorted(ip_ranges) + for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips() + ] + ), + virtual_network_rules=[ + storage.VirtualNetworkRuleArgs( + virtual_network_resource_id=props.subnet_data_desired_state_id, + ) + ], + ), + resource_group_name=props.resource_group_name, + sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS), + opts=child_opts, + tags=child_tags, + ) + # Deploy desired state share + container_desired_state = storage.BlobContainer( + f"{self._name}_blob_desired_state", + account_name=storage_account_data_desired_state.name, + container_name="desiredstate", + default_encryption_scope="$account-encryption-key", + deny_encryption_scope_override=False, + public_access=storage.PublicAccess.NONE, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=storage_account_data_desired_state), + ), + ) + # Set storage container ACLs + BlobContainerAcl( + f"{container_desired_state._name}_acl", + BlobContainerAclProps( + acl_user="r-x", + acl_group="r-x", + acl_other="r-x", + # ensure that the above permissions are also set on any newly created + # files (eg. with Azure Storage Explorer) + apply_default_permissions=True, + container_name=container_desired_state.name, + resource_group_name=props.resource_group_name, + storage_account_name=storage_account_data_desired_state.name, + subscription_name=props.subscription_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_desired_state) + ), + ) + # Create file assets to upload + desired_state_directory = (resources_path / "workspace" / "ansible").absolute() + files_desired_state = [ + ( + FileAsset(str(file_path)), + file_path.name, + str(file_path.relative_to(desired_state_directory)), + ) + for file_path in sorted(desired_state_directory.rglob("*")) + if file_path.is_file() and not file_path.name.startswith(".") + ] + # Upload file assets to desired state container + for file_asset, file_name, file_path in files_desired_state: + storage.Blob( + f"{container_desired_state._name}_blob_{file_name}", + account_name=storage_account_data_desired_state.name, + blob_name=file_path, + container_name=container_desired_state.name, + resource_group_name=props.resource_group_name, + source=file_asset, + ) + # Set up a private endpoint for the desired state storage account + storage_account_data_desired_state_endpoint = network.PrivateEndpoint( + f"{storage_account_data_desired_state._name}_private_endpoint", + location=props.location, + private_endpoint_name=f"{stack_name}-pep-storage-account-data-desired-state", + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["blob"], + name=f"{stack_name}-cnxn-pep-storage-account-data-private-sensitive", + private_link_service_id=storage_account_data_desired_state.id, + ) + ], + resource_group_name=props.resource_group_name, + subnet=network.SubnetArgs(id=props.subnet_data_desired_state_id), + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["custom_dns_configs"], + parent=storage_account_data_desired_state, + ), + ), + tags=child_tags, + ) + # Add a private DNS record for each desired state endpoint custom DNS config + network.PrivateDnsZoneGroup( + f"{storage_account_data_desired_state._name}_private_dns_zone_group", + private_dns_zone_configs=[ + network.PrivateDnsZoneConfigArgs( + name=replace_separators( + f"{stack_name}-storage-account-data-desired-state-to-{dns_zone_name}", + "-", + ), + private_dns_zone_id=props.dns_private_zones[dns_zone_name].id, + ) + for dns_zone_name in AzureDnsZoneNames.STORAGE_ACCOUNT + ], + private_dns_zone_group_name=f"{stack_name}-dzg-storage-account-data-desired-state", + private_endpoint_name=storage_account_data_desired_state_endpoint.name, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=storage_account_data_desired_state), + ), + ) + + # Deploy sensitive data blob storage account + # - This holds the /data and /output containers that are mounted by workspaces + # - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer + storage_account_data_private_sensitive = storage.StorageAccount( + f"{self._name}_storage_account_data_private_sensitive", + # Storage account names have a maximum of 24 characters + account_name=alphanumeric( + f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}" + )[:24], + enable_https_traffic_only=True, + enable_nfs_v3=True, + encryption=storage.EncryptionArgs( + key_source=storage.KeySource.MICROSOFT_STORAGE, + services=storage.EncryptionServicesArgs( + blob=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + file=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + ), + ), + kind=storage.Kind.BLOCK_BLOB_STORAGE, + is_hns_enabled=True, + location=props.location, + network_rule_set=storage.NetworkRuleSetArgs( + bypass=storage.Bypass.AZURE_SERVICES, + default_action=storage.DefaultAction.DENY, + ip_rules=Output.from_input( + props.data_private_sensitive_ip_addresses + ).apply( + lambda ip_ranges: [ + storage.IPRuleArgs( + action=storage.Action.ALLOW, + i_p_address_or_range=str(ip_address), + ) + for ip_range in sorted(ip_ranges) + for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips() + ] + ), + virtual_network_rules=[ + storage.VirtualNetworkRuleArgs( + virtual_network_resource_id=props.subnet_data_private_id, + ) + ], + ), + resource_group_name=props.resource_group_name, + sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS), + opts=child_opts, + tags=child_tags, + ) + # Deploy storage containers + storage_container_egress = storage.BlobContainer( + f"{self._name}_blob_egress", + account_name=storage_account_data_private_sensitive.name, + container_name="egress", + default_encryption_scope="$account-encryption-key", + deny_encryption_scope_override=False, + public_access=storage.PublicAccess.NONE, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=storage_account_data_private_sensitive), + ), + ) + storage_container_ingress = storage.BlobContainer( + f"{self._name}_blob_ingress", + account_name=storage_account_data_private_sensitive.name, + container_name="ingress", + default_encryption_scope="$account-encryption-key", + deny_encryption_scope_override=False, + public_access=storage.PublicAccess.NONE, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=storage_account_data_private_sensitive), + ), + ) + # Set storage container ACLs + BlobContainerAcl( + f"{storage_container_egress._name}_acl", + BlobContainerAclProps( + acl_user="rwx", + acl_group="rwx", + acl_other="rwx", + # due to an Azure bug `apply_default_permissions=True` also gives user + # 65533 ownership of the fileshare (preventing use inside the SRE) + apply_default_permissions=False, + container_name=storage_container_egress.name, + resource_group_name=props.resource_group_name, + storage_account_name=storage_account_data_private_sensitive.name, + subscription_name=props.subscription_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=storage_container_egress) + ), + ) + BlobContainerAcl( + f"{storage_container_ingress._name}_acl", + BlobContainerAclProps( + acl_user="rwx", + acl_group="r-x", + acl_other="r-x", + # ensure that the above permissions are also set on any newly created + # files (eg. with Azure Storage Explorer) + apply_default_permissions=True, + container_name=storage_container_ingress.name, + resource_group_name=props.resource_group_name, + storage_account_name=storage_account_data_private_sensitive.name, + subscription_name=props.subscription_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=storage_container_ingress) + ), + ) + # Set up a private endpoint for the sensitive data storage account + storage_account_data_private_sensitive_endpoint = network.PrivateEndpoint( + f"{storage_account_data_private_sensitive._name}_private_endpoint", + location=props.location, + private_endpoint_name=f"{stack_name}-pep-storage-account-data-private-sensitive", + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["blob"], + name=f"{stack_name}-cnxn-pep-storage-account-data-private-sensitive", + private_link_service_id=storage_account_data_private_sensitive.id, + ) + ], + resource_group_name=props.resource_group_name, + subnet=network.SubnetArgs(id=props.subnet_data_private_id), + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["custom_dns_configs"], + parent=storage_account_data_private_sensitive, + ), + ), + tags=child_tags, + ) + # Add a private DNS record for each sensitive data endpoint custom DNS config + network.PrivateDnsZoneGroup( + f"{storage_account_data_private_sensitive._name}_private_dns_zone_group", + private_dns_zone_configs=[ + network.PrivateDnsZoneConfigArgs( + name=replace_separators( + f"{stack_name}-storage-account-data-private-sensitive-to-{dns_zone_name}", + "-", + ), + private_dns_zone_id=props.dns_private_zones[dns_zone_name].id, + ) + for dns_zone_name in AzureDnsZoneNames.STORAGE_ACCOUNT + ], + private_dns_zone_group_name=f"{stack_name}-dzg-storage-account-data-private-sensitive", + private_endpoint_name=storage_account_data_private_sensitive_endpoint.name, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=storage_account_data_private_sensitive), + ), + ) + + # Give the "Storage Blob Data Owner" role to the Azure admin group + # for the data resource group + authorization.RoleAssignment( + f"{self._name}_data_owner_role_assignment", + principal_id=props.admin_group_id, + principal_type=authorization.PrincipalType.GROUP, + role_assignment_name=str( + seeded_uuid(f"{stack_name} Storage Blob Data Owner") + ), + role_definition_id=Output.concat( + "/subscriptions/", + props.subscription_id, + "/providers/Microsoft.Authorization/roleDefinitions/", + self.azure_role_ids["Storage Blob Data Owner"], + ), + scope=props.resource_group_id, + opts=child_opts, + ) + + # Deploy data_private_user files storage account + # - This holds the /home and /shared containers that are mounted by workspaces + # - Azure Files has better NFS support but cannot be accessed with Azure Storage Explorer + # - Allows root-squashing to be configured + storage_account_data_private_user = storage.StorageAccount( + f"{self._name}_storage_account_data_private_user", + access_tier=storage.AccessTier.COOL, + # Storage account names have a maximum of 24 characters + account_name=alphanumeric( + f"{''.join(truncate_tokens(stack_name.split('-'), 16))}userdata{sha256hash(self._name)}" + )[:24], + enable_https_traffic_only=False, + encryption=storage.EncryptionArgs( + key_source=storage.KeySource.MICROSOFT_STORAGE, + services=storage.EncryptionServicesArgs( + file=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + ), + ), + kind=storage.Kind.FILE_STORAGE, + location=props.location, + network_rule_set=storage.NetworkRuleSetArgs( + bypass=storage.Bypass.AZURE_SERVICES, + default_action=storage.DefaultAction.DENY, + virtual_network_rules=[ + storage.VirtualNetworkRuleArgs( + virtual_network_resource_id=props.subnet_data_private_id, + ) + ], + ), + resource_group_name=props.resource_group_name, + sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS), + opts=child_opts, + tags=child_tags, + ) + storage.FileShare( + f"{storage_account_data_private_user._name}_files_home", + access_tier=storage.ShareAccessTier.PREMIUM, + account_name=storage_account_data_private_user.name, + enabled_protocols=storage.EnabledProtocols.NFS, + resource_group_name=props.resource_group_name, + # Squashing prevents root from creating user home directories + root_squash=storage.RootSquashType.NO_ROOT_SQUASH, + share_name="home", + share_quota=1024, + signed_identifiers=[], + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=storage_account_data_private_user) + ), + ) + storage.FileShare( + f"{storage_account_data_private_user._name}_files_shared", + access_tier=storage.ShareAccessTier.PREMIUM, + account_name=storage_account_data_private_user.name, + enabled_protocols=storage.EnabledProtocols.NFS, + resource_group_name=props.resource_group_name, + root_squash=storage.RootSquashType.ROOT_SQUASH, + share_name="shared", + share_quota=1024, + signed_identifiers=[], + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=storage_account_data_private_user) + ), + ) + # Set up a private endpoint for the user data storage account + storage_account_data_private_user_endpoint = network.PrivateEndpoint( + f"{storage_account_data_private_user._name}_private_endpoint", + location=props.location, + private_endpoint_name=f"{stack_name}-pep-storage-account-data-private-user", + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["file"], + name=f"{stack_name}-cnxn-pep-storage-account-data-private-user", + private_link_service_id=storage_account_data_private_user.id, + ) + ], + resource_group_name=props.resource_group_name, + subnet=network.SubnetArgs(id=props.subnet_data_private_id), + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["custom_dns_configs"], + parent=storage_account_data_private_user, + ), + ), + tags=child_tags, + ) + # Add a private DNS record for each user data endpoint custom DNS config + network.PrivateDnsZoneGroup( + f"{storage_account_data_private_user._name}_private_dns_zone_group", + private_dns_zone_configs=[ + network.PrivateDnsZoneConfigArgs( + name=replace_separators( + f"{stack_name}-storage-account-data-private-user-to-{dns_zone_name}", + "-", + ), + private_dns_zone_id=props.dns_private_zones[dns_zone_name].id, + ) + for dns_zone_name in AzureDnsZoneNames.STORAGE_ACCOUNT + ], + private_dns_zone_group_name=f"{stack_name}-dzg-storage-account-data-private-user", + private_endpoint_name=storage_account_data_private_user_endpoint.name, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=storage_account_data_private_user) + ), + ) + + # Register outputs + self.sre_fqdn_certificate_secret_id = sre_fqdn_certificate.secret_id + self.storage_account_data_private_user_name = ( + storage_account_data_private_user.name + ) + self.storage_account_data_private_sensitive_id = ( + storage_account_data_private_sensitive.id + ) + self.storage_account_data_private_sensitive_name = ( + storage_account_data_private_sensitive.name + ) + self.storage_account_data_configuration_key = Output.secret( + storage_account_data_configuration_keys.apply( + lambda keys: keys.keys[0].value + ) + ) + self.storage_account_data_configuration_name = ( + storage_account_data_configuration.name + ) + self.storage_account_data_desired_state_name = ( + storage_account_data_desired_state.name + ) + self.managed_identity = identity_key_vault_reader + self.password_nexus_admin = Output.secret(password_nexus_admin.result) + self.password_database_service_admin = Output.secret( + password_database_service_admin.result + ) + self.password_dns_server_admin = Output.secret( + Output.from_input(props.password_dns_server_admin).apply( + lambda password: password.result + ) + ) + self.password_gitea_database_admin = Output.secret( + password_gitea_database_admin.result + ) + self.password_hedgedoc_database_admin = Output.secret( + password_hedgedoc_database_admin.result + ) + self.password_user_database_admin = Output.secret( + password_user_database_admin.result + ) + self.password_workspace_admin = Output.secret(password_workspace_admin.result) + + # Register exports + self.exports = { + "key_vault_name": key_vault.name, + "password_user_database_admin_secret": kvs_password_user_database_admin.name, + } diff --git a/data_safe_haven/infrastructure/programs/sre/database_servers.py b/data_safe_haven/infrastructure/programs/sre/database_servers.py new file mode 100644 index 0000000000..64dfe0062e --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/database_servers.py @@ -0,0 +1,111 @@ +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, ResourceOptions + +from data_safe_haven.infrastructure.components import ( + LocalDnsRecordComponent, + LocalDnsRecordProps, + MicrosoftSQLDatabaseComponent, + MicrosoftSQLDatabaseProps, + PostgresqlDatabaseComponent, + PostgresqlDatabaseProps, +) +from data_safe_haven.types import DatabaseSystem + + +class SREDatabaseServerProps: + """Properties for SREDatabaseServerComponent""" + + def __init__( + self, + database_password: Input[str], + database_system: DatabaseSystem, # this must *not* be passed as an Input[T] + location: Input[str], + resource_group_name: Input[str], + sre_fqdn: Input[str], + subnet_id: Input[str], + ) -> None: + self.database_password = database_password + self.database_system = database_system + self.database_username = "databaseadmin" + self.location = location + self.resource_group_name = resource_group_name + self.sre_fqdn = sre_fqdn + self.subnet_id = subnet_id + + +class SREDatabaseServerComponent(ComponentResource): + """Deploy database server with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREDatabaseServerProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:DatabaseServerComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "database server"} | (tags if tags else {}) + + if props.database_system == DatabaseSystem.MICROSOFT_SQL_SERVER: + # Define a Microsoft SQL server and default database + db_server_mssql = MicrosoftSQLDatabaseComponent( + f"{self._name}_db_mssql", + MicrosoftSQLDatabaseProps( + database_names=[], + database_password=props.database_password, + database_resource_group_name=props.resource_group_name, + database_server_name=f"{stack_name}-db-server-mssql", + database_subnet_id=props.subnet_id, + database_username=props.database_username, + location=props.location, + ), + opts=child_opts, + tags=child_tags, + ) + # Register the database in the SRE DNS zone + LocalDnsRecordComponent( + f"{self._name}_mssql_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=db_server_mssql.private_ip_address, + record_name="mssql", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=db_server_mssql) + ), + ) + + if props.database_system == DatabaseSystem.POSTGRESQL: + # Define a PostgreSQL server and default database + db_server_postgresql = PostgresqlDatabaseComponent( + f"{self._name}_db_postgresql", + PostgresqlDatabaseProps( + database_names=[], + database_password=props.database_password, + database_resource_group_name=props.resource_group_name, + database_server_name=f"{stack_name}-db-server-postgresql", + database_subnet_id=props.subnet_id, + database_username=props.database_username, + disable_secure_transport=True, + location=props.location, + ), + opts=child_opts, + tags=child_tags, + ) + # Register the database in the SRE DNS zone + LocalDnsRecordComponent( + f"{self._name}_postgresql_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=db_server_postgresql.private_ip_address, + record_name="postgresql", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=db_server_postgresql) + ), + ) diff --git a/data_safe_haven/infrastructure/programs/sre/dns_server.py b/data_safe_haven/infrastructure/programs/sre/dns_server.py new file mode 100644 index 0000000000..df85e09d83 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/dns_server.py @@ -0,0 +1,323 @@ +"""Pulumi component for SRE DNS server""" + +from collections.abc import Mapping + +import pulumi_random +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, network + +from data_safe_haven.functions import b64encode, replace_separators +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + SREDnsIpRanges, + SREIpRanges, + get_ip_address_from_container_group, +) +from data_safe_haven.resources import resources_path +from data_safe_haven.types import ( + AzureDnsZoneNames, + NetworkingPriorities, + PermittedDomains, + Ports, +) +from data_safe_haven.utility import FileReader + + +class SREDnsServerProps: + """Properties for SREDnsServerComponent""" + + def __init__( + self, + dockerhub_credentials: DockerHubCredentials, + location: Input[str], + resource_group_name: Input[str], + shm_fqdn: Input[str], + ) -> None: + self.admin_username = "dshadmin" + self.dockerhub_credentials = dockerhub_credentials + self.location = location + self.resource_group_name = resource_group_name + self.shm_fqdn = shm_fqdn + + +class SREDnsServerComponent(ComponentResource): + """Deploy DNS server with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREDnsServerProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:DnsServerComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "DNS server"} | (tags if tags else {}) + + # Generate admin password + password_admin = pulumi_random.RandomPassword( + f"{self._name}_password_admin", length=20, special=True, opts=child_opts + ) + + # Read AdGuardHome setup files + adguard_entrypoint_sh_reader = FileReader( + resources_path / "dns_server" / "entrypoint.sh" + ) + adguard_adguardhome_yaml_reader = FileReader( + resources_path / "dns_server" / "AdGuardHome.mustache.yaml" + ) + + # Expand AdGuardHome YAML configuration + adguard_adguardhome_yaml_contents = Output.all( + admin_username=props.admin_username, + # Only the first 72 bytes of the generated random string will be used but a + # 20 character UTF-8 string (alphanumeric + special) will not exceed that. + admin_password_encrypted=password_admin.bcrypt_hash, + # Use Azure virtual DNS server as upstream + # https://learn.microsoft.com/en-us/azure/virtual-network/what-is-ip-address-168-63-129-16 + # This server is aware of private DNS zones + upstream_dns="168.63.129.16", + filter_allow=Output.from_input(props.shm_fqdn).apply( + lambda fqdn: [ + f"*.{fqdn}", + *PermittedDomains.ALL, + ] + ), + ).apply( + lambda mustache_values: adguard_adguardhome_yaml_reader.file_contents( + mustache_values + ) + ) + + # Define network security group + nsg = network.NetworkSecurityGroup( + f"{self._name}_nsg_dns", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-dns", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from attached.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowSREInbound", + priority=NetworkingPriorities.INTERNAL_SRE_ANY, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.vnet.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound DNS traffic over the internet.", + destination_address_prefix="Internet", + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDnsInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREDnsIpRanges.vnet.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + + # Deploy dedicated virtual network + subnet_name = "DnsSubnet" + virtual_network = network.VirtualNetwork( + f"{self._name}_virtual_network", + address_space=network.AddressSpaceArgs( + address_prefixes=[SREDnsIpRanges.vnet.prefix], + ), + location=props.location, + resource_group_name=props.resource_group_name, + subnets=[ # Note that we define subnets inline to avoid creation order issues + # DNS subnet + network.SubnetArgs( + address_prefix=SREDnsIpRanges.vnet.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_name, + network_security_group=network.NetworkSecurityGroupArgs(id=nsg.id), + route_table=None, + ), + ], + virtual_network_name=f"{stack_name}-vnet-dns", + virtual_network_peerings=[], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["virtual_network_peerings"] + ), # allow peering to SRE virtual network + ), + tags=child_tags, + ) + + subnet_dns = network.get_subnet_output( + subnet_name=subnet_name, + resource_group_name=props.resource_group_name, + virtual_network_name=virtual_network.name, + ) + + # Define the DNS container group with AdGuard + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-dns", + containers=[ + containerinstance.ContainerArgs( + image="adguard/adguardhome:v0.107.52", + name="adguard", + # Providing "command" overwrites the CMD arguments in the Docker + # image, so we can either provide them here or set defaults in our + # custom entrypoint. + # + # The entrypoint script will not be executable when mounted so we + # need to explicitly run it with /bin/sh + command=["/bin/sh", "/opt/adguardhome/custom/entrypoint.sh"], + environment_variables=[], + # All Azure Container Instances need to expose port 80 on at least + # one container. In this case, the web interface is on 3000 so we + # are not exposing that to users. + ports=[ + containerinstance.ContainerPortArgs( + port=53, + protocol=containerinstance.ContainerGroupNetworkProtocol.UDP, + ), + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=1, + memory_in_gb=1, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/opt/adguardhome/custom", + name="adguard-opt-adguardhome-custom", + read_only=True, + ), + ], + ), + ], + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret(props.dockerhub_credentials.access_token), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[containerinstance.ContainerGroupSubnetIdArgs(id=subnet_dns.id)], + volumes=[ + containerinstance.VolumeArgs( + name="adguard-opt-adguardhome-custom", + secret={ + "entrypoint.sh": b64encode( + adguard_entrypoint_sh_reader.file_contents() + ), + "AdGuardHome.yaml": adguard_adguardhome_yaml_contents.apply( + lambda s: b64encode(s) + ), + }, + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, + replace_on_changes=["containers"], + ), + ), + tags=child_tags, + ) + + # Create a private DNS zone for each Azure DNS zone name + self.private_zones = { + dns_zone_name: network.PrivateZone( + replace_separators(f"{self._name}_private_zone_{dns_zone_name}", "_"), + location="Global", + private_zone_name=f"privatelink.{dns_zone_name}", + resource_group_name=props.resource_group_name, + opts=child_opts, + tags=child_tags, + ) + for dns_zone_name in AzureDnsZoneNames.ALL + } + + # Link Azure private DNS zones to virtual network + for dns_zone_name, private_dns_zone in self.private_zones.items(): + network.VirtualNetworkLink( + replace_separators( + f"{self._name}_private_zone_{dns_zone_name}_vnet_dns_link", "_" + ), + location="Global", + private_zone_name=private_dns_zone.name, + registration_enabled=False, + resource_group_name=props.resource_group_name, + virtual_network=network.SubResourceArgs(id=virtual_network.id), + virtual_network_link_name=Output.concat( + "link-to-", virtual_network.name + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=virtual_network) + ), + tags=child_tags, + ) + + # Register outputs + self.ip_address = get_ip_address_from_container_group(container_group) + self.password_admin = password_admin + self.virtual_network = virtual_network diff --git a/data_safe_haven/infrastructure/programs/sre/firewall.py b/data_safe_haven/infrastructure/programs/sre/firewall.py new file mode 100644 index 0000000000..97f7a885b7 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/firewall.py @@ -0,0 +1,355 @@ +"""Pulumi component for SRE traffic routing""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import network + +from data_safe_haven.infrastructure.common import ( + get_address_prefixes_from_subnet, + get_id_from_subnet, +) +from data_safe_haven.types import ( + FirewallPriorities, + ForbiddenDomains, + PermittedDomains, + Ports, +) + + +class SREFirewallProps: + """Properties for SREFirewallComponent""" + + def __init__( + self, + location: Input[str], + resource_group_name: Input[str], + route_table_name: Input[str], + subnet_apt_proxy_server: Input[network.GetSubnetResult], + subnet_clamav_mirror: Input[network.GetSubnetResult], + subnet_firewall: Input[network.GetSubnetResult], + subnet_firewall_management: Input[network.GetSubnetResult], + subnet_guacamole_containers: Input[network.GetSubnetResult], + subnet_identity_containers: Input[network.GetSubnetResult], + subnet_user_services_software_repositories: Input[network.GetSubnetResult], + subnet_workspaces: Input[network.GetSubnetResult], + ) -> None: + self.location = location + self.resource_group_name = resource_group_name + self.route_table_name = route_table_name + self.subnet_apt_proxy_server_prefixes = Output.from_input( + subnet_apt_proxy_server + ).apply(get_address_prefixes_from_subnet) + self.subnet_clamav_mirror_prefixes = Output.from_input( + subnet_clamav_mirror + ).apply(get_address_prefixes_from_subnet) + self.subnet_identity_containers_prefixes = Output.from_input( + subnet_identity_containers + ).apply(get_address_prefixes_from_subnet) + self.subnet_firewall_id = Output.from_input(subnet_firewall).apply( + get_id_from_subnet + ) + self.subnet_firewall_management_id = Output.from_input( + subnet_firewall_management + ).apply(get_id_from_subnet) + self.subnet_guacamole_containers_prefixes = Output.from_input( + subnet_guacamole_containers + ).apply(get_address_prefixes_from_subnet) + self.subnet_user_services_software_repositories_prefixes = Output.from_input( + subnet_user_services_software_repositories + ).apply(get_address_prefixes_from_subnet) + self.subnet_workspaces_prefixes = Output.from_input(subnet_workspaces).apply( + get_address_prefixes_from_subnet + ) + + +class SREFirewallComponent(ComponentResource): + """Deploy an SRE firewall with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREFirewallProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:FirewallComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "firewall"} | (tags if tags else {}) + + # Deploy IP address + public_ip = network.PublicIPAddress( + f"{self._name}_pip_firewall", + location=props.location, + public_ip_address_name=f"{stack_name}-pip-firewall", + public_ip_allocation_method=network.IPAllocationMethod.STATIC, + resource_group_name=props.resource_group_name, + sku=network.PublicIPAddressSkuArgs( + name=network.PublicIPAddressSkuName.STANDARD + ), + opts=child_opts, + tags=child_tags, + ) + + # Note that a Basic SKU firewall needs a separate management IP address and + # subnet to handle traffic for communicating updates and health metrics to and + # from Microsoft. + public_ip_management = network.PublicIPAddress( + f"{self._name}_pip_firewall_management", + location=props.location, + public_ip_address_name=f"{stack_name}-pip-firewall-management", + public_ip_allocation_method=network.IPAllocationMethod.STATIC, + resource_group_name=props.resource_group_name, + sku=network.PublicIPAddressSkuArgs( + name=network.PublicIPAddressSkuName.STANDARD + ), + opts=child_opts, + tags=child_tags, + ) + + # Deploy firewall + firewall = network.AzureFirewall( + f"{self._name}_firewall", + application_rule_collections=[ + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.ALLOW + ), + name="apt-proxy-server", + priority=FirewallPriorities.SRE_APT_PROXY_SERVER, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Allow external apt repository requests", + name="AllowAptRepositories", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTP), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTP, + ), + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ), + ], + source_addresses=props.subnet_apt_proxy_server_prefixes, + target_fqdns=PermittedDomains.APT_REPOSITORIES, + ), + ], + ), + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.ALLOW + ), + name="clamav-mirror", + priority=FirewallPriorities.SRE_CLAMAV_MIRROR, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Allow external ClamAV definition update requests", + name="AllowClamAVDefinitionUpdates", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTP), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTP, + ), + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ), + ], + source_addresses=props.subnet_clamav_mirror_prefixes, + target_fqdns=PermittedDomains.CLAMAV_UPDATES, + ), + ], + ), + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.ALLOW + ), + name="identity-server", + priority=FirewallPriorities.SRE_IDENTITY_CONTAINERS, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Allow Microsoft OAuth login requests", + name="AllowMicrosoftOAuthLogin", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ) + ], + source_addresses=props.subnet_identity_containers_prefixes, + target_fqdns=PermittedDomains.MICROSOFT_IDENTITY, + ), + ], + ), + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.ALLOW + ), + name="remote-desktop-gateway", + priority=FirewallPriorities.SRE_GUACAMOLE_CONTAINERS, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Allow Microsoft OAuth login requests", + name="AllowMicrosoftOAuthLogin", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ) + ], + source_addresses=props.subnet_guacamole_containers_prefixes, + target_fqdns=PermittedDomains.MICROSOFT_LOGIN, + ), + ], + ), + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.ALLOW + ), + name="software-repositories", + priority=FirewallPriorities.SRE_USER_SERVICES_SOFTWARE_REPOSITORIES, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Allow external CRAN package requests", + name="AllowCRANPackageDownload", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ) + ], + source_addresses=props.subnet_user_services_software_repositories_prefixes, + target_fqdns=PermittedDomains.SOFTWARE_REPOSITORIES_R, + ), + network.AzureFirewallApplicationRuleArgs( + description="Allow external PyPI package requests", + name="AllowPyPIPackageDownload", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ) + ], + source_addresses=props.subnet_user_services_software_repositories_prefixes, + target_fqdns=PermittedDomains.SOFTWARE_REPOSITORIES_PYTHON, + ), + ], + ), + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.ALLOW + ), + name="workspaces", + priority=FirewallPriorities.SRE_WORKSPACES, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Allow external Ubuntu keyserver requests", + name="AllowUbuntuKeyserver", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HKP), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTP, + ), + ], + source_addresses=props.subnet_workspaces_prefixes, + target_fqdns=PermittedDomains.UBUNTU_KEYSERVER, + ), + network.AzureFirewallApplicationRuleArgs( + description="Allow external Ubuntu Snap Store access", + name="AllowUbuntuSnapcraft", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ), + ], + source_addresses=props.subnet_workspaces_prefixes, + target_fqdns=PermittedDomains.UBUNTU_SNAPCRAFT, + ), + network.AzureFirewallApplicationRuleArgs( + description="Allow external RStudio deb downloads", + name="AllowRStudioDeb", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ), + ], + source_addresses=props.subnet_workspaces_prefixes, + target_fqdns=PermittedDomains.RSTUDIO_DEB, + ), + ], + ), + network.AzureFirewallApplicationRuleCollectionArgs( + action=network.AzureFirewallRCActionArgs( + type=network.AzureFirewallRCActionType.DENY + ), + name="workspaces-deny", + priority=FirewallPriorities.SRE_WORKSPACES_DENY, + rules=[ + network.AzureFirewallApplicationRuleArgs( + description="Deny external Ubuntu Snap Store upload and login access", + name="DenyUbuntuSnapcraft", + protocols=[ + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTP), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTP, + ), + network.AzureFirewallApplicationRuleProtocolArgs( + port=int(Ports.HTTPS), + protocol_type=network.AzureFirewallApplicationRuleProtocolType.HTTPS, + ), + ], + source_addresses=props.subnet_workspaces_prefixes, + target_fqdns=ForbiddenDomains.UBUNTU_SNAPCRAFT, + ), + ], + ), + ], + azure_firewall_name=f"{stack_name}-firewall", + ip_configurations=[ + network.AzureFirewallIPConfigurationArgs( + name="FirewallIpConfiguration", + public_ip_address=network.SubResourceArgs(id=public_ip.id), + subnet=network.SubResourceArgs(id=props.subnet_firewall_id), + ) + ], + location=props.location, + management_ip_configuration=network.AzureFirewallIPConfigurationArgs( + name="FirewallManagementIpConfiguration", + public_ip_address=network.SubResourceArgs(id=public_ip_management.id), + subnet=network.SubResourceArgs(id=props.subnet_firewall_management_id), + ), + resource_group_name=props.resource_group_name, + sku=network.AzureFirewallSkuArgs( + name=network.AzureFirewallSkuName.AZF_W_V_NET, + tier=network.AzureFirewallSkuTier.BASIC, + ), + opts=child_opts, + tags=child_tags, + ) + + # Retrieve the private IP address for the firewall + private_ip_address = firewall.ip_configurations.apply( + lambda cfgs: "" if not cfgs else cfgs[0].private_ip_address + ) + + # Route all external traffic through the firewall. + # + # We use the system default route "0.0.0.0/0" as this will be overruled by + # anything more specific, such as VNet <-> VNet traffic which we do not want to + # send via the firewall. + # + # See https://learn.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview + network.Route( + f"{self._name}_route_via_firewall", + address_prefix="0.0.0.0/0", + next_hop_ip_address=private_ip_address, + next_hop_type=network.RouteNextHopType.VIRTUAL_APPLIANCE, + resource_group_name=props.resource_group_name, + route_name="ViaFirewall", + route_table_name=props.route_table_name, + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=firewall)), + ) diff --git a/data_safe_haven/infrastructure/programs/sre/gitea_server.py b/data_safe_haven/infrastructure/programs/sre/gitea_server.py new file mode 100644 index 0000000000..655f9b0016 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/gitea_server.py @@ -0,0 +1,350 @@ +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, storage + +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_ip_address_from_container_group, +) +from data_safe_haven.infrastructure.components import ( + FileShareFile, + FileShareFileProps, + LocalDnsRecordComponent, + LocalDnsRecordProps, + PostgresqlDatabaseComponent, + PostgresqlDatabaseProps, +) +from data_safe_haven.resources import resources_path +from data_safe_haven.utility import FileReader + + +class SREGiteaServerProps: + """Properties for SREGiteaServerComponent""" + + def __init__( + self, + containers_subnet_id: Input[str], + database_password: Input[str], + database_subnet_id: Input[str], + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + ldap_server_hostname: Input[str], + ldap_server_port: Input[int], + ldap_username_attribute: Input[str], + ldap_user_filter: Input[str], + ldap_user_search_base: Input[str], + location: Input[str], + resource_group_name: Input[str], + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + database_username: Input[str] | None = None, + ) -> None: + self.containers_subnet_id = containers_subnet_id + self.database_password = database_password + self.database_subnet_id = database_subnet_id + self.database_username = ( + database_username if database_username else "postgresadmin" + ) + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.ldap_server_hostname = ldap_server_hostname + self.ldap_server_port = ldap_server_port + self.ldap_username_attribute = ldap_username_attribute + self.ldap_user_filter = ldap_user_filter + self.ldap_user_search_base = ldap_user_search_base + self.location = location + self.resource_group_name = resource_group_name + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + + +class SREGiteaServerComponent(ComponentResource): + """Deploy Gitea server with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREGiteaServerProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:GiteaServerComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "Gitea server"} | (tags if tags else {}) + + # Define configuration file shares + file_share_gitea_caddy = storage.FileShare( + f"{self._name}_file_share_gitea_caddy", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="gitea-caddy", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + file_share_gitea_gitea = storage.FileShare( + f"{self._name}_file_share_gitea_gitea", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="gitea-gitea", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + + # Upload caddy file + caddy_caddyfile_reader = FileReader( + resources_path / "gitea" / "caddy" / "Caddyfile" + ) + file_share_gitea_caddy_caddyfile = FileShareFile( + f"{self._name}_file_share_gitea_caddy_caddyfile", + FileShareFileProps( + destination_path=caddy_caddyfile_reader.name, + share_name=file_share_gitea_caddy.name, + file_contents=Output.secret(caddy_caddyfile_reader.file_contents()), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_gitea_caddy) + ), + ) + + # Upload Gitea configuration script + gitea_configure_sh_reader = FileReader( + resources_path / "gitea" / "gitea" / "configure.mustache.sh" + ) + gitea_configure_sh = Output.all( + admin_email="dshadmin@example.com", + admin_username="dshadmin", + ldap_username_attribute=props.ldap_username_attribute, + ldap_user_filter=props.ldap_user_filter, + ldap_server_hostname=props.ldap_server_hostname, + ldap_server_port=props.ldap_server_port, + ldap_user_search_base=props.ldap_user_search_base, + ).apply( + lambda mustache_values: gitea_configure_sh_reader.file_contents( + mustache_values + ) + ) + file_share_gitea_gitea_configure_sh = FileShareFile( + f"{self._name}_file_share_gitea_gitea_configure_sh", + FileShareFileProps( + destination_path=gitea_configure_sh_reader.name, + share_name=file_share_gitea_gitea.name, + file_contents=Output.secret(gitea_configure_sh), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_gitea_gitea) + ), + ) + # Upload Gitea entrypoint script + gitea_entrypoint_sh_reader = FileReader( + resources_path / "gitea" / "gitea" / "entrypoint.sh" + ) + file_share_gitea_gitea_entrypoint_sh = FileShareFile( + f"{self._name}_file_share_gitea_gitea_entrypoint_sh", + FileShareFileProps( + destination_path=gitea_entrypoint_sh_reader.name, + share_name=file_share_gitea_gitea.name, + file_contents=Output.secret(gitea_entrypoint_sh_reader.file_contents()), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_gitea_gitea) + ), + ) + + # Define a PostgreSQL server and default database + db_gitea_repository_name = "gitea" + db_server_gitea = PostgresqlDatabaseComponent( + f"{self._name}_db_gitea", + PostgresqlDatabaseProps( + database_names=[db_gitea_repository_name], + database_password=props.database_password, + database_resource_group_name=props.resource_group_name, + database_server_name=f"{stack_name}-db-server-gitea", + database_subnet_id=props.database_subnet_id, + database_username=props.database_username, + disable_secure_transport=False, + location=props.location, + ), + opts=child_opts, + tags=child_tags, + ) + + # Define the container group with guacd, guacamole and caddy + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-gitea", + containers=[ + containerinstance.ContainerArgs( + image="caddy:2.8.4", + name="caddy"[:63], + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=0.5, + memory_in_gb=0.5, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/etc/caddy", + name="caddy-etc-caddy", + read_only=True, + ), + ], + ), + containerinstance.ContainerArgs( + image="gitea/gitea:1.22.1", + name="gitea"[:63], + command=["/app/custom/entrypoint.sh"], + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="APP_NAME", value="Data Safe Haven Git server" + ), + containerinstance.EnvironmentVariableArgs( + name="RUN_MODE", value="dev" + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__database__DB_TYPE", value="postgres" + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__database__HOST", + value=db_server_gitea.private_ip_address, + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__database__NAME", value=db_gitea_repository_name + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__database__USER", + value=props.database_username, + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__database__PASSWD", + secure_value=props.database_password, + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__database__SSL_MODE", value="require" + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__log__LEVEL", + # Options are: "Trace", "Debug", "Info" [default], "Warn", "Error", "Critical" or "None". + value="Debug", + ), + containerinstance.EnvironmentVariableArgs( + name="GITEA__security__INSTALL_LOCK", value="true" + ), + ], + ports=[ + containerinstance.ContainerPortArgs( + port=22, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=2, + memory_in_gb=2, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/app/custom", + name="gitea-app-custom", + read_only=True, + ), + ], + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret(props.dockerhub_credentials.access_token), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs( + id=props.containers_subnet_id + ) + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_gitea_caddy.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="caddy-etc-caddy", + ), + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_gitea_gitea.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="gitea-app-custom", + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, + depends_on=[ + file_share_gitea_caddy_caddyfile, + file_share_gitea_gitea_configure_sh, + file_share_gitea_gitea_entrypoint_sh, + ], + replace_on_changes=["containers"], + ), + ), + tags=child_tags, + ) + + # Register the container group in the SRE DNS zone + LocalDnsRecordComponent( + f"{self._name}_gitea_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=get_ip_address_from_container_group(container_group), + record_name="gitea", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_group) + ), + ) diff --git a/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py b/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py new file mode 100644 index 0000000000..5187401de5 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py @@ -0,0 +1,329 @@ +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, storage + +from data_safe_haven.functions import b64encode +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_ip_address_from_container_group, +) +from data_safe_haven.infrastructure.components import ( + FileShareFile, + FileShareFileProps, + LocalDnsRecordComponent, + LocalDnsRecordProps, + PostgresqlDatabaseComponent, + PostgresqlDatabaseProps, +) +from data_safe_haven.resources import resources_path +from data_safe_haven.types import Ports +from data_safe_haven.utility import FileReader + + +class SREHedgeDocServerProps: + """Properties for SREHedgeDocServerComponent""" + + def __init__( + self, + containers_subnet_id: Input[str], + database_password: Input[str], + database_subnet_id: Input[str], + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + ldap_server_hostname: Input[str], + ldap_server_port: Input[int], + ldap_user_filter: Input[str], + ldap_user_search_base: Input[str], + ldap_username_attribute: Input[str], + location: Input[str], + resource_group_name: Input[str], + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + database_username: Input[str] | None = None, + ) -> None: + self.containers_subnet_id = containers_subnet_id + self.database_subnet_id = database_subnet_id + self.database_password = database_password + self.database_username = ( + database_username if database_username else "postgresadmin" + ) + self.resource_group_name = resource_group_name + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.ldap_server_hostname = ldap_server_hostname + self.ldap_server_port = Output.from_input(ldap_server_port).apply(str) + self.ldap_user_filter = ldap_user_filter + self.ldap_user_search_base = ldap_user_search_base + self.ldap_username_attribute = ldap_username_attribute + self.location = location + self.resource_group_name = resource_group_name + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + + +class SREHedgeDocServerComponent(ComponentResource): + """Deploy HedgeDoc server with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREHedgeDocServerProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:HedgeDocServerComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "HedgeDoc server"} | (tags if tags else {}) + + # Define configuration file shares + file_share_hedgedoc_caddy = storage.FileShare( + f"{self._name}_file_share_hedgedoc_caddy", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="hedgedoc-caddy", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + + # Upload caddy file + caddy_caddyfile_reader = FileReader( + resources_path / "hedgedoc" / "caddy" / "Caddyfile" + ) + file_share_hedgedoc_caddy_caddyfile = FileShareFile( + f"{self._name}_file_share_hedgedoc_caddy_caddyfile", + FileShareFileProps( + destination_path=caddy_caddyfile_reader.name, + share_name=file_share_hedgedoc_caddy.name, + file_contents=Output.secret(caddy_caddyfile_reader.file_contents()), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_hedgedoc_caddy) + ), + ) + + # Load HedgeDoc configuration file for later use + hedgedoc_config_json_reader = FileReader( + resources_path / "hedgedoc" / "hedgedoc" / "config.json" + ) + + # Define a PostgreSQL server and default database + db_hedgedoc_documents_name = "hedgedoc" + db_server_hedgedoc = PostgresqlDatabaseComponent( + f"{self._name}_db_hedgedoc", + PostgresqlDatabaseProps( + database_names=[db_hedgedoc_documents_name], + database_password=props.database_password, + database_resource_group_name=props.resource_group_name, + database_server_name=f"{stack_name}-db-server-hedgedoc", + database_subnet_id=props.database_subnet_id, + database_username=props.database_username, + disable_secure_transport=False, + location=props.location, + ), + opts=child_opts, + tags=child_tags, + ) + + # Define the container group with caddy and HedgeDoc + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-hedgedoc", + containers=[ + containerinstance.ContainerArgs( + image="caddy:2.8.4", + name="caddy"[:63], + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=0.5, + memory_in_gb=0.5, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/etc/caddy", + name="caddy-etc-caddy", + read_only=True, + ), + ], + ), + containerinstance.ContainerArgs( + image="quay.io/hedgedoc/hedgedoc:1.9.9", + name="hedgedoc"[:63], + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="CMD_ALLOW_ANONYMOUS", + value="false", + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DB_DATABASE", + value=db_hedgedoc_documents_name, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DB_DIALECT", + value="postgres", + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DB_HOST", + value=db_server_hedgedoc.private_ip_address, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DB_PASSWORD", + secure_value=props.database_password, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DB_PORT", + value=Ports.POSTGRESQL, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DB_USERNAME", + value=props.database_username, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_DOMAIN", + value=Output.concat("hedgedoc.", props.sre_fqdn), + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_EMAIL", + value="false", + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_LDAP_PROVIDERNAME", + value="Data Safe Haven", + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_LDAP_SEARCHBASE", + value=props.ldap_user_search_base, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_LDAP_SEARCHFILTER", + value=Output.concat( + "(&", + props.ldap_user_filter, + "(", + props.ldap_username_attribute, + "={{username}})", + ")", + ), + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_LDAP_URL", + value=Output.concat( + "ldap://", + props.ldap_server_hostname, + ":", + props.ldap_server_port, + ), + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_LDAP_USERIDFIELD", + value=props.ldap_username_attribute, + ), + containerinstance.EnvironmentVariableArgs( + name="CMD_LOGLEVEL", + value="info", + ), + ], + ports=[], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=2, + memory_in_gb=2, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/files", + name="hedgedoc-files-config-json", + read_only=True, + ), + ], + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret(props.dockerhub_credentials.access_token), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs( + id=props.containers_subnet_id + ) + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_hedgedoc_caddy.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="caddy-etc-caddy", + ), + containerinstance.VolumeArgs( + name="hedgedoc-files-config-json", + secret={ + "config.json": b64encode( + hedgedoc_config_json_reader.file_contents() + ) + }, + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, + depends_on=[ + file_share_hedgedoc_caddy_caddyfile, + ], + replace_on_changes=["containers"], + ), + ), + tags=child_tags, + ) + + # Register the container group in the SRE DNS zone + LocalDnsRecordComponent( + f"{self._name}_hedgedoc_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=get_ip_address_from_container_group(container_group), + record_name="hedgedoc", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_group) + ), + ) diff --git a/data_safe_haven/infrastructure/programs/sre/identity.py b/data_safe_haven/infrastructure/programs/sre/identity.py new file mode 100644 index 0000000000..0f885b94c5 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/identity.py @@ -0,0 +1,251 @@ +"""Pulumi component for SRE identity""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, network, storage + +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_id_from_subnet, + get_ip_address_from_container_group, +) +from data_safe_haven.infrastructure.components import ( + EntraApplication, + EntraApplicationProps, + LocalDnsRecordComponent, + LocalDnsRecordProps, +) + + +class SREIdentityProps: + """Properties for SREIdentityComponent""" + + def __init__( + self, + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + entra_application_name: Input[str], + entra_auth_token: str, + entra_tenant_id: Input[str], + location: Input[str], + resource_group_name: Input[str], + shm_fqdn: Input[str], + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + subnet_containers: Input[network.GetSubnetResult], + ) -> None: + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.entra_application_name = entra_application_name + self.entra_auth_token = entra_auth_token + self.entra_tenant_id = entra_tenant_id + self.location = location + self.resource_group_name = resource_group_name + self.shm_fqdn = shm_fqdn + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + self.subnet_containers_id = Output.from_input(subnet_containers).apply( + get_id_from_subnet + ) + + +class SREIdentityComponent(ComponentResource): + """Deploy SRE backup with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREIdentityProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:IdentityComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "identity"} | (tags if tags else {}) + + # The port that the server will be hosted on + self.server_port = 1389 + + # Define configuration file share + file_share = storage.FileShare( + f"{self._name}_file_share", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="identity-redis", + share_quota=5, + signed_identifiers=[], + opts=child_opts, + ) + + # Define Entra ID application + entra_application = EntraApplication( + f"{self._name}_entra_application", + EntraApplicationProps( + application_name=props.entra_application_name, + application_role_assignments=["User.Read.All", "GroupMember.Read.All"], + application_secret_name="Apricot Authentication Secret", + delegated_role_assignments=["User.Read.All"], + public_client_redirect_uri="urn:ietf:wg:oauth:2.0:oob", + ), + auth_token=props.entra_auth_token, + opts=child_opts, + ) + + # Define the LDAP server container group with Apricot + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-identity", + containers=[ + containerinstance.ContainerArgs( + image="ghcr.io/alan-turing-institute/apricot:0.0.7", + name="apricot", + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="BACKEND", + value="MicrosoftEntra", + ), + containerinstance.EnvironmentVariableArgs( + name="CLIENT_ID", + value=entra_application.application_id, + ), + containerinstance.EnvironmentVariableArgs( + name="CLIENT_SECRET", + secure_value=entra_application.application_secret, + ), + containerinstance.EnvironmentVariableArgs( + name="DEBUG", + value="true", + ), + containerinstance.EnvironmentVariableArgs( + name="DOMAIN", + value=props.shm_fqdn, + ), + containerinstance.EnvironmentVariableArgs( + name="ENTRA_TENANT_ID", + value=props.entra_tenant_id, + ), + containerinstance.EnvironmentVariableArgs( + name="REDIS_HOST", + value="localhost", + ), + ], + # All Azure Container Instances need to expose port 80 on at least + # one container even if there's nothing behind it. + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + containerinstance.ContainerPortArgs( + port=self.server_port, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=1, + memory_in_gb=1, + ), + ), + volume_mounts=[], + ), + containerinstance.ContainerArgs( + image="redis:7.4.0", + name="redis", + environment_variables=[], + ports=[ + containerinstance.ContainerPortArgs( + port=6379, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=1, + memory_in_gb=1, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/data", + name="identity-redis-data", + read_only=False, + ), + ], + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret(props.dockerhub_credentials.access_token), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + containerinstance.PortArgs( + port=self.server_port, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ), + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs( + id=props.subnet_containers_id + ) + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="identity-redis-data", + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, + replace_on_changes=["containers"], + ), + ), + tags=child_tags, + ) + + # Register the container group in the SRE DNS zone + local_dns = LocalDnsRecordComponent( + f"{self._name}_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=get_ip_address_from_container_group(container_group), + record_name="identity", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_group) + ), + ) + + # Register outputs + self.hostname = local_dns.hostname diff --git a/data_safe_haven/infrastructure/programs/sre/monitoring.py b/data_safe_haven/infrastructure/programs/sre/monitoring.py new file mode 100644 index 0000000000..f672c7eb63 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/monitoring.py @@ -0,0 +1,300 @@ +"""Pulumi component for SRE monitoring""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import insights, maintenance, network, operationalinsights + +from data_safe_haven.functions import next_occurrence, replace_separators +from data_safe_haven.infrastructure.common import get_id_from_subnet +from data_safe_haven.infrastructure.components import WrappedLogAnalyticsWorkspace +from data_safe_haven.types import AzureDnsZoneNames + + +class SREMonitoringProps: + """Properties for SREMonitoringComponent""" + + def __init__( + self, + dns_private_zones: Input[dict[str, network.PrivateZone]], + location: Input[str], + resource_group_name: Input[str], + subnet: Input[network.GetSubnetResult], + timezone: Input[str], + ) -> None: + self.dns_private_zones = dns_private_zones + self.location = location + self.resource_group_name = resource_group_name + self.subnet_id = Output.from_input(subnet).apply(get_id_from_subnet) + self.timezone = timezone + + +class SREMonitoringComponent(ComponentResource): + """Deploy SRE monitoring with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREMonitoringProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:MonitoringComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "monitoring"} | (tags if tags else {}) + + # Deploy maintenance configuration + # See https://learn.microsoft.com/en-us/azure/update-manager/scheduled-patching + self.maintenance_configuration = maintenance.MaintenanceConfiguration( + f"{self._name}_maintenance_configuration", + duration="03:55", # Maximum allowed value for this parameter + extension_properties={"InGuestPatchMode": "User"}, + install_patches=maintenance.InputPatchConfigurationArgs( + linux_parameters=maintenance.InputLinuxParametersArgs( + classifications_to_include=["Critical", "Security"], + ), + reboot_setting="IfRequired", + ), + location=props.location, + maintenance_scope=maintenance.MaintenanceScope.IN_GUEST_PATCH, + recur_every="1Day", + resource_group_name=props.resource_group_name, + resource_name_=f"{stack_name}-maintenance-configuration", + start_date_time=Output.from_input(props.timezone).apply( + lambda timezone: next_occurrence( + hour=2, + minute=4, + timezone=timezone, + time_format="iso_minute", + ) # Run maintenance at 02:04 local time every night + ), + time_zone="UTC", # Our start time is given in UTC + visibility=maintenance.Visibility.CUSTOM, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + # Ignore start_date_time or this will be changed on each redeploy + ignore_changes=["start_date_time"] + ), + ), + tags=child_tags, + ) + + # Deploy log analytics workspace and get workspace keys + self.log_analytics = WrappedLogAnalyticsWorkspace( + f"{self._name}_log_analytics", + location=props.location, + resource_group_name=props.resource_group_name, + retention_in_days=30, + sku=operationalinsights.WorkspaceSkuArgs( + name=operationalinsights.WorkspaceSkuNameEnum.PER_GB2018, + ), + workspace_name=f"{stack_name}-log", + opts=child_opts, + tags=child_tags, + ) + + # Create a private linkscope + log_analytics_private_link_scope = insights.PrivateLinkScope( + f"{self._name}_log_analytics_private_link_scope", + access_mode_settings=insights.AccessModeSettingsArgs( + ingestion_access_mode=insights.AccessMode.PRIVATE_ONLY, + query_access_mode=insights.AccessMode.PRIVATE_ONLY, + ), + location="Global", + resource_group_name=props.resource_group_name, + scope_name=f"{stack_name}-ampls", + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + parent=self.log_analytics, + ), + ), + tags=child_tags, + ) + # Link the private linkscope to the log analytics workspace + insights.PrivateLinkScopedResource( + f"{self._name}_log_analytics_ampls_connection", + linked_resource_id=self.log_analytics.id, + name=f"{stack_name}-cnxn-ampls-to-log-analytics", + resource_group_name=props.resource_group_name, + scope_name=log_analytics_private_link_scope.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=log_analytics_private_link_scope) + ), + ) + + # Create a private endpoint for the log analytics workspace + log_analytics_private_endpoint = network.PrivateEndpoint( + f"{self._name}_log_analytics_private_endpoint", + custom_network_interface_name=f"{stack_name}-pep-log-analytics-nic", + location=props.location, + private_endpoint_name=f"{stack_name}-pep-log-analytics", + private_link_service_connections=[ + network.PrivateLinkServiceConnectionArgs( + group_ids=["azuremonitor"], + name=f"{stack_name}-cnxn-ampls-to-pep-log-analytics", + private_link_service_id=log_analytics_private_link_scope.id, + ) + ], + resource_group_name=props.resource_group_name, + subnet=network.SubnetArgs(id=props.subnet_id), + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + depends_on=[log_analytics_private_link_scope, self.log_analytics], + ignore_changes=["custom_dns_configs"], + parent=self.log_analytics, + ), + ), + tags=child_tags, + ) + + # Add a private DNS record for each log analytics workspace custom DNS config + network.PrivateDnsZoneGroup( + f"{self._name}_log_analytics_private_dns_zone_group", + private_dns_zone_configs=[ + network.PrivateDnsZoneConfigArgs( + name=replace_separators( + f"{stack_name}-log-to-{dns_zone_name}", "-" + ), + private_dns_zone_id=props.dns_private_zones[dns_zone_name].id, + ) + for dns_zone_name in AzureDnsZoneNames.AZURE_MONITOR + ], + private_dns_zone_group_name=f"{stack_name}-dzg-log", + private_endpoint_name=log_analytics_private_endpoint.name, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + depends_on=log_analytics_private_endpoint, + parent=log_analytics_private_endpoint, + ), + ), + ) + + # Create a data collection endpoint + self.data_collection_endpoint = insights.DataCollectionEndpoint( + f"{self._name}_data_collection_endpoint", + data_collection_endpoint_name=f"{stack_name}-dce", + location=props.location, + network_acls=insights.DataCollectionEndpointNetworkAclsArgs( + public_network_access=insights.KnownPublicNetworkAccessOptions.DISABLED, + ), + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=self.log_analytics), + ), + tags=child_tags, + ) + # Link the private linkscope to the data collection endpoint + insights.PrivateLinkScopedResource( + f"{self._name}_data_collection_endpoint_ampls_connection", + linked_resource_id=self.data_collection_endpoint.id, + name=f"{stack_name}-cnxn-ampls-to-dce", + resource_group_name=props.resource_group_name, + scope_name=log_analytics_private_link_scope.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=log_analytics_private_link_scope) + ), + ) + + # Create a data collection rule for VM logs + self.data_collection_rule_vms = insights.DataCollectionRule( + f"{self._name}_data_collection_rule_vms", + data_collection_rule_name=f"{stack_name}-dcr-vms", + data_collection_endpoint_id=self.data_collection_endpoint.id, # used by Logs Ingestion API + destinations=insights.DataCollectionRuleDestinationsArgs( + log_analytics=[ + insights.LogAnalyticsDestinationArgs( + name=self.log_analytics.name, + workspace_resource_id=self.log_analytics.id, + ) + ], + ), + data_flows=[ + insights.DataFlowArgs( + destinations=[self.log_analytics.name], + streams=[ + insights.KnownDataFlowStreams.MICROSOFT_PERF, + ], + transform_kql="source", + output_stream=insights.KnownDataFlowStreams.MICROSOFT_PERF, + ), + insights.DataFlowArgs( + destinations=[self.log_analytics.name], + streams=[ + insights.KnownDataFlowStreams.MICROSOFT_SYSLOG, + ], + transform_kql="source", + output_stream=insights.KnownDataFlowStreams.MICROSOFT_SYSLOG, + ), + ], + data_sources=insights.DataCollectionRuleDataSourcesArgs( + performance_counters=[ + insights.PerfCounterDataSourceArgs( + counter_specifiers=[ + "Processor(*)\\% Processor Time", + "Memory(*)\\% Used Memory", + "Logical Disk(*)\\% Used Space", + "System(*)\\Unique Users", + ], + name="LinuxPerfCounters", + sampling_frequency_in_seconds=60, + streams=[ + insights.KnownPerfCounterDataSourceStreams.MICROSOFT_PERF, + ], + ), + ], + syslog=[ + insights.SyslogDataSourceArgs( + facility_names=[ + # Note that ASTERISK is not currently working + insights.KnownSyslogDataSourceFacilityNames.ALERT, + insights.KnownSyslogDataSourceFacilityNames.AUDIT, + insights.KnownSyslogDataSourceFacilityNames.AUTH, + insights.KnownSyslogDataSourceFacilityNames.AUTHPRIV, + insights.KnownSyslogDataSourceFacilityNames.CLOCK, + insights.KnownSyslogDataSourceFacilityNames.CRON, + insights.KnownSyslogDataSourceFacilityNames.DAEMON, + insights.KnownSyslogDataSourceFacilityNames.FTP, + insights.KnownSyslogDataSourceFacilityNames.KERN, + insights.KnownSyslogDataSourceFacilityNames.LPR, + insights.KnownSyslogDataSourceFacilityNames.MAIL, + insights.KnownSyslogDataSourceFacilityNames.MARK, + insights.KnownSyslogDataSourceFacilityNames.NEWS, + insights.KnownSyslogDataSourceFacilityNames.NOPRI, + insights.KnownSyslogDataSourceFacilityNames.NTP, + insights.KnownSyslogDataSourceFacilityNames.SYSLOG, + insights.KnownSyslogDataSourceFacilityNames.USER, + insights.KnownSyslogDataSourceFacilityNames.UUCP, + ], + log_levels=[ + # Note that ASTERISK is not currently working + insights.KnownSyslogDataSourceLogLevels.DEBUG, + insights.KnownSyslogDataSourceLogLevels.INFO, + insights.KnownSyslogDataSourceLogLevels.NOTICE, + insights.KnownSyslogDataSourceLogLevels.WARNING, + insights.KnownSyslogDataSourceLogLevels.ERROR, + insights.KnownSyslogDataSourceLogLevels.CRITICAL, + insights.KnownSyslogDataSourceLogLevels.ALERT, + insights.KnownSyslogDataSourceLogLevels.EMERGENCY, + ], + name="LinuxSyslog", + streams=[ + insights.KnownSyslogDataSourceStreams.MICROSOFT_SYSLOG + ], + ), + ], + ), + location=props.location, + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, + ResourceOptions(parent=self.log_analytics), + ), + tags=child_tags, + ) diff --git a/data_safe_haven/infrastructure/programs/sre/networking.py b/data_safe_haven/infrastructure/programs/sre/networking.py new file mode 100644 index 0000000000..5578d791a0 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/networking.py @@ -0,0 +1,2039 @@ +"""Pulumi component for SRE networking""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import network + +from data_safe_haven.functions import alphanumeric, replace_separators +from data_safe_haven.infrastructure.common import ( + SREDnsIpRanges, + SREIpRanges, + get_id_from_vnet, + get_name_from_vnet, +) +from data_safe_haven.types import NetworkingPriorities, Ports + + +class SRENetworkingProps: + """Properties for SRENetworkingComponent""" + + def __init__( + self, + dns_private_zones: Input[dict[str, network.PrivateZone]], + dns_server_ip: Input[str], + dns_virtual_network: Input[network.VirtualNetwork], + location: Input[str], + resource_group_name: Input[str], + shm_fqdn: Input[str], + shm_resource_group_name: Input[str], + shm_zone_name: Input[str], + sre_name: Input[str], + user_public_ip_ranges: Input[list[str]], + ) -> None: + # Other variables + self.dns_private_zones = dns_private_zones + self.dns_virtual_network_id = Output.from_input(dns_virtual_network).apply( + get_id_from_vnet + ) + self.dns_virtual_network_name = Output.from_input(dns_virtual_network).apply( + get_name_from_vnet + ) + self.dns_server_ip = dns_server_ip + self.location = location + self.resource_group_name = resource_group_name + self.shm_fqdn = shm_fqdn + self.shm_resource_group_name = shm_resource_group_name + self.shm_zone_name = shm_zone_name + self.sre_name = sre_name + self.user_public_ip_ranges = user_public_ip_ranges + + +class SRENetworkingComponent(ComponentResource): + """Deploy networking with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SRENetworkingProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:NetworkingComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "networking"} | (tags if tags else {}) + + # Define route table + route_table = network.RouteTable( + f"{self._name}_route_table", + location=props.location, + resource_group_name=props.resource_group_name, + route_table_name=f"{stack_name}-route-table", + routes=[], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["routes"] + ), # allow routes to be created outside this definition + ), + tags=child_tags, + ) + + # Define NSGs + nsg_application_gateway = network.NetworkSecurityGroup( + f"{self._name}_nsg_application_gateway", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-application-gateway", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound gateway management service traffic.", + destination_address_prefix="*", + destination_port_ranges=["65200-65535"], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowGatewayManagerServiceInbound", + priority=NetworkingPriorities.AZURE_GATEWAY_MANAGER, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix="GatewayManager", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound Azure load balancer traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowAzureLoadBalancerServiceInbound", + priority=NetworkingPriorities.AZURE_LOAD_BALANCER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="AzureLoadBalancer", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from users over the internet.", + destination_address_prefix=SREIpRanges.application_gateway.prefix, + destination_port_ranges=[Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowUsersInternetInbound", + priority=NetworkingPriorities.AUTHORISED_EXTERNAL_USER_IPS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefixes=props.user_public_ip_ranges, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from from ssllabs.com for SSL quality reporting.", + destination_address_prefix=SREIpRanges.application_gateway.prefix, + destination_port_ranges=[Ports.HTTPS], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowSslLabsInternetInbound", + priority=NetworkingPriorities.AUTHORISED_EXTERNAL_SSL_LABS_IPS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix="64.41.200.0/24", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + # Attempting to add our standard DenyAzurePlatformDnsOutbound rule will + # cause this NSG to fail validation. More details here: + # https://learn.microsoft.com/en-us/azure/application-gateway/configuration-infrastructure#network-security-groups + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.application_gateway.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to the Guacamole remote desktop gateway.", + destination_address_prefix=SREIpRanges.guacamole_containers.prefix, + destination_port_ranges=[Ports.HTTP], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowGuacamoleContainersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_GUACAMOLE_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.application_gateway.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound gateway management traffic over the internet.", + destination_address_prefix="Internet", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowGatewayManagerInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Attempting to add our standard DenyAllOtherOutbound rule will cause + # this NSG to fail validation. More details here: + # https://learn.microsoft.com/en-us/azure/application-gateway/configuration-infrastructure#network-security-groups + ], + opts=child_opts, + tags=child_tags, + ) + nsg_apt_proxy_server = network.NetworkSecurityGroup( + f"{self._name}_nsg_apt_proxy_server", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-apt-proxy-server", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from SRE workspaces.", + destination_address_prefix=SREIpRanges.apt_proxy_server.prefix, + destination_port_ranges=[Ports.LINUX_UPDATE], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.apt_proxy_server.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.apt_proxy_server.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to external repositories over the internet.", + destination_address_prefix="Internet", + destination_port_ranges=[Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowPackagesInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.apt_proxy_server.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_clamav_mirror = network.NetworkSecurityGroup( + f"{self._name}_nsg_clamav_mirror", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-clamav-mirror", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from SRE workspaces.", + destination_address_prefix=SREIpRanges.clamav_mirror.prefix, + destination_port_ranges=[Ports.HTTP, Ports.HTTPS, Ports.SQUID], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.clamav_mirror.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.clamav_mirror.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to ClamAV repositories over the internet.", + destination_address_prefix="Internet", + destination_port_ranges=[Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowClamAVDefinitionsInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.clamav_mirror.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_data_configuration = network.NetworkSecurityGroup( + f"{self._name}_nsg_data_configuration", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-data-configuration", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from Guacamole remote desktop gateway.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowGuacamoleContainersInbound", + priority=NetworkingPriorities.INTERNAL_SRE_GUACAMOLE_CONTAINERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from identity containers.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowIdentityServersInbound", + priority=NetworkingPriorities.INTERNAL_SRE_IDENTITY_CONTAINERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.identity_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from user services containers.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowUserServicesContainersInbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_CONTAINERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from user services software repositories.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowUserServicesSoftwareRepositoriesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_SOFTWARE_REPOSITORIES, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_software_repositories.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_data_desired_state = network.NetworkSecurityGroup( + f"{self._name}_nsg_data_desired_state", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-data-desired-state", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from workspaces.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_data_private = network.NetworkSecurityGroup( + f"{self._name}_nsg_data_private", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-data-private", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from SRE workspaces.", + destination_address_prefix=SREIpRanges.data_private.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_guacamole_containers = network.NetworkSecurityGroup( + f"{self._name}_nsg_guacamole_containers", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-guacamole-containers", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from the Application Gateway.", + destination_address_prefix=SREIpRanges.guacamole_containers.prefix, + destination_port_ranges=[Ports.HTTP], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowApplicationGatewayInbound", + priority=NetworkingPriorities.INTERNAL_SRE_APPLICATION_GATEWAY, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.application_gateway.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to Guacamole support services.", + destination_address_prefix=SREIpRanges.guacamole_containers_support.prefix, + destination_port_ranges=[Ports.POSTGRESQL], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowGuacamoleContainersSupportOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_GUACAMOLE_CONTAINERS_SUPPORT, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow LDAP client requests over TCP.", + destination_address_prefix=SREIpRanges.identity_containers.prefix, + destination_port_ranges=[Ports.LDAP_APRICOT], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowIdentityServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_IDENTITY_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to SRE workspaces.", + destination_address_prefix=SREIpRanges.workspaces.prefix, + destination_port_ranges=[Ports.SSH, Ports.RDP], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowWorkspacesOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound OAuth connections over the internet.", + destination_address_prefix="Internet", + destination_port_ranges=[Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowOAuthInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_guacamole_containers_support = network.NetworkSecurityGroup( + f"{self._name}_nsg_guacamole_containers_support", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-guacamole-containers-support", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from Guacamole remote desktop gateway.", + destination_address_prefix=SREIpRanges.guacamole_containers_support.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowGuacamoleContainersInbound", + priority=NetworkingPriorities.INTERNAL_SRE_GUACAMOLE_CONTAINERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_identity_containers = network.NetworkSecurityGroup( + f"{self._name}_nsg_identity_containers", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-identity-containers", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow LDAP client requests from Guacamole over TCP.", + destination_address_prefix=SREIpRanges.identity_containers.prefix, + destination_port_ranges=[Ports.LDAP_APRICOT], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowGuacamoleLDAPClientTCPInbound", + priority=NetworkingPriorities.INTERNAL_SRE_GUACAMOLE_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow LDAP client requests from user services over TCP.", + destination_address_prefix=SREIpRanges.identity_containers.prefix, + destination_port_ranges=[Ports.LDAP_APRICOT], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowUserServicesLDAPClientTCPInbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow LDAP client requests from workspaces over TCP.", + destination_address_prefix=SREIpRanges.identity_containers.prefix, + destination_port_ranges=[Ports.LDAP_APRICOT], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspaceLDAPClientTCPInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.identity_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.identity_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound OAuth connections over the internet.", + destination_address_prefix="Internet", + destination_port_ranges=[Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowOAuthInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.identity_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_monitoring = network.NetworkSecurityGroup( + f"{self._name}_nsg_monitoring", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-monitoring", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from own subnet.", + destination_address_prefix=SREIpRanges.monitoring.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="AllowMonitoringToolsInbound", + priority=NetworkingPriorities.INTERNAL_SRE_SELF, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.monitoring.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from workspaces.", + destination_address_prefix=SREIpRanges.monitoring.prefix, + destination_port_ranges=[Ports.HTTPS], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to own subnet.", + destination_address_prefix=SREIpRanges.monitoring.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowMonitoringToolsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_SELF, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.monitoring.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to workspaces.", + destination_address_prefix=SREIpRanges.workspaces.prefix, + destination_port_ranges=[Ports.AZURE_MONITORING], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowWorkspacesOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.monitoring.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_user_services_containers = network.NetworkSecurityGroup( + f"{self._name}_nsg_user_services_containers", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-user-services-containers", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from SRE workspaces.", + destination_address_prefix=SREIpRanges.user_services_containers.prefix, + destination_port_ranges=[Ports.SSH, Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow LDAP client requests over TCP.", + destination_address_prefix=SREIpRanges.identity_containers.prefix, + destination_port_ranges=[Ports.LDAP_APRICOT], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowIdentityServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_IDENTITY_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to container support services.", + destination_address_prefix=SREIpRanges.user_services_containers_support.prefix, + destination_port_ranges=[Ports.POSTGRESQL], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowUserServicesContainersSupportOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_CONTAINERS_SUPPORT, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_user_services_containers_support = network.NetworkSecurityGroup( + f"{self._name}_nsg_user_services_containers_support", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-user-services-containers-support", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from user services containers.", + destination_address_prefix=SREIpRanges.user_services_containers_support.prefix, + destination_port_ranges=[Ports.POSTGRESQL], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowUserServicesContainersInbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.user_services_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_user_services_databases = network.NetworkSecurityGroup( + f"{self._name}_nsg_user_services_databases", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-user-services-databases", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from SRE workspaces.", + destination_address_prefix=SREIpRanges.user_services_databases.prefix, + destination_port_ranges=[Ports.MSSQL, Ports.POSTGRESQL], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_databases.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_databases.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_user_services_software_repositories = network.NetworkSecurityGroup( + f"{self._name}_nsg_user_services_software_repositories", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-user-services-software-repositories", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from SRE workspaces.", + destination_address_prefix=SREIpRanges.user_services_software_repositories.prefix, + destination_port_ranges=[Ports.HTTP, Ports.HTTPS, Ports.SQUID], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowWorkspacesInbound", + priority=NetworkingPriorities.INTERNAL_SRE_WORKSPACES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_software_repositories.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to configuration data endpoints.", + destination_address_prefix=SREIpRanges.data_configuration.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataConfigurationEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_CONFIGURATION, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.user_services_software_repositories.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to external repositories over the internet.", + destination_address_prefix="Internet", + destination_port_ranges=[Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowPackagesInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.user_services_software_repositories.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + nsg_workspaces = network.NetworkSecurityGroup( + f"{self._name}_nsg_workspaces", + location=props.location, + network_security_group_name=f"{stack_name}-nsg-workspaces", + resource_group_name=props.resource_group_name, + security_rules=[ + # Inbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from monitoring tools.", + destination_address_prefix=SREIpRanges.workspaces.prefix, + destination_port_ranges=[Ports.AZURE_MONITORING], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowMonitoringToolsInbound", + priority=NetworkingPriorities.AZURE_MONITORING_SOURCES, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.monitoring.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow inbound connections from Guacamole remote desktop gateway.", + destination_address_prefix=SREIpRanges.workspaces.prefix, + destination_port_ranges=[Ports.SSH, Ports.RDP], + direction=network.SecurityRuleDirection.INBOUND, + name="AllowGuacamoleContainersInbound", + priority=NetworkingPriorities.INTERNAL_SRE_GUACAMOLE_CONTAINERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.guacamole_containers.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other inbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.INBOUND, + name="DenyAllOtherInbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + # Outbound + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", + destination_address_prefix="AzurePlatformDNS", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAzurePlatformDnsOutbound", + priority=NetworkingPriorities.AZURE_PLATFORM_DNS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to ClamAV mirror.", + destination_address_prefix=SREIpRanges.clamav_mirror.prefix, + destination_port_ranges=[Ports.HTTP], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowClamAVMirrorOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_CLAMAV_MIRROR, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow LDAP client requests over TCP.", + destination_address_prefix=SREIpRanges.identity_containers.prefix, + destination_port_ranges=[Ports.LDAP_APRICOT], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowIdentityServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_IDENTITY_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to DNS servers.", + destination_address_prefix=SREDnsIpRanges.vnet.prefix, + destination_port_ranges=[Ports.DNS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDNSServersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DNS_SERVERS, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to private data endpoints.", + destination_address_prefix=SREIpRanges.data_private.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataPrivateEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_PRIVATE, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to desired state data endpoints.", + destination_address_prefix=SREIpRanges.data_desired_state.prefix, + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowDataDesiredStateEndpointsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_DATA_DESIRED_STATE, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to monitoring tools.", + destination_address_prefix=SREIpRanges.monitoring.prefix, + destination_port_ranges=[Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowMonitoringToolsOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_MONITORING_TOOLS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to user services containers.", + destination_address_prefix=SREIpRanges.user_services_containers.prefix, + destination_port_ranges=[Ports.SSH, Ports.HTTP, Ports.HTTPS], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowUserServicesContainersOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_CONTAINERS, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to user services databases.", + destination_address_prefix=SREIpRanges.user_services_databases.prefix, + destination_port_ranges=[Ports.MSSQL, Ports.POSTGRESQL], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowUserServicesDatabasesOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_DATABASES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to user services software repositories.", + destination_address_prefix=SREIpRanges.user_services_software_repositories.prefix, + destination_port_ranges=[Ports.HTTP, Ports.HTTPS, Ports.SQUID], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowUserServicesSoftwareRepositoriesOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_USER_SERVICES_SOFTWARE_REPOSITORIES, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound connections to apt proxy server.", + destination_address_prefix=SREIpRanges.apt_proxy_server.prefix, + destination_port_ranges=[Ports.LINUX_UPDATE], + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowAptProxyServerOutbound", + priority=NetworkingPriorities.INTERNAL_SRE_APT_PROXY_SERVER, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.ALLOW, + description="Allow outbound configuration traffic over the internet.", + destination_address_prefix="Internet", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="AllowConfigurationInternetOutbound", + priority=NetworkingPriorities.EXTERNAL_INTERNET, + protocol=network.SecurityRuleProtocol.TCP, + source_address_prefix=SREIpRanges.workspaces.prefix, + source_port_range="*", + ), + network.SecurityRuleArgs( + access=network.SecurityRuleAccess.DENY, + description="Deny all other outbound traffic.", + destination_address_prefix="*", + destination_port_range="*", + direction=network.SecurityRuleDirection.OUTBOUND, + name="DenyAllOtherOutbound", + priority=NetworkingPriorities.ALL_OTHER, + protocol=network.SecurityRuleProtocol.ASTERISK, + source_address_prefix="*", + source_port_range="*", + ), + ], + opts=child_opts, + tags=child_tags, + ) + + # Define the virtual network and its subnets + # Note that these names for AzureFirewall subnets are required by Azure + subnet_application_gateway_name = "ApplicationGatewaySubnet" + subnet_apt_proxy_server_name = "AptProxyServerSubnet" + subnet_clamav_mirror_name = "ClamAVMirrorSubnet" + subnet_data_configuration_name = "DataConfigurationSubnet" + subnet_data_desired_state_name = "DataDesiredStateSubnet" + subnet_data_private_name = "DataPrivateSubnet" + subnet_firewall_name = "AzureFirewallSubnet" + subnet_firewall_management_name = "AzureFirewallManagementSubnet" + subnet_guacamole_containers_name = "GuacamoleContainersSubnet" + subnet_guacamole_containers_support_name = "GuacamoleContainersSupportSubnet" + subnet_identity_containers_name = "IdentityContainersSubnet" + subnet_monitoring_name = "MonitoringSubnet" + subnet_user_services_containers_name = "UserServicesContainersSubnet" + subnet_user_services_containers_support_name = ( + "UserServicesContainersSupportSubnet" + ) + subnet_user_services_databases_name = "UserServicesDatabasesSubnet" + subnet_user_services_software_repositories_name = ( + "UserServicesSoftwareRepositoriesSubnet" + ) + subnet_workspaces_name = "WorkspacesSubnet" + sre_virtual_network = network.VirtualNetwork( + f"{self._name}_virtual_network", + address_space=network.AddressSpaceArgs( + address_prefixes=[SREIpRanges.vnet.prefix], + ), + dhcp_options=network.DhcpOptionsArgs(dns_servers=[props.dns_server_ip]), + location=props.location, + resource_group_name=props.resource_group_name, + # Note that we define subnets inline to avoid creation order issues + subnets=[ + # Application gateway subnet + network.SubnetArgs( + address_prefix=SREIpRanges.application_gateway.prefix, + name=subnet_application_gateway_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_application_gateway.id + ), + route_table=None, # the application gateway must not go via the firewall + ), + # apt proxy server + network.SubnetArgs( + address_prefix=SREIpRanges.apt_proxy_server.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_apt_proxy_server_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_apt_proxy_server.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # ClamAV mirror + network.SubnetArgs( + address_prefix=SREIpRanges.clamav_mirror.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_clamav_mirror_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_clamav_mirror.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # Configuration data subnet + network.SubnetArgs( + address_prefix=SREIpRanges.data_configuration.prefix, + name=subnet_data_configuration_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_data_configuration.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + service_endpoints=[ + network.ServiceEndpointPropertiesFormatArgs( + locations=[props.location], + service="Microsoft.Storage", + ) + ], + ), + # Desired state data subnet + network.SubnetArgs( + address_prefix=SREIpRanges.data_desired_state.prefix, + name=subnet_data_desired_state_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_data_desired_state.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + service_endpoints=[ + network.ServiceEndpointPropertiesFormatArgs( + locations=[props.location], + service="Microsoft.Storage", + ) + ], + ), + # Private data subnet + network.SubnetArgs( + address_prefix=SREIpRanges.data_private.prefix, + name=subnet_data_private_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_data_private.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + service_endpoints=[ + network.ServiceEndpointPropertiesFormatArgs( + locations=[props.location], + service="Microsoft.Storage", + ) + ], + ), + # Firewall + network.SubnetArgs( + address_prefix=SREIpRanges.firewall.prefix, + name=subnet_firewall_name, + # Note that NSGs cannot be attached to a subnet containing a firewall + ), + # Firewall management + network.SubnetArgs( + address_prefix=SREIpRanges.firewall_management.prefix, + name=subnet_firewall_management_name, + # Note that NSGs cannot be attached to a subnet containing a firewall + ), + # Guacamole containers + network.SubnetArgs( + address_prefix=SREIpRanges.guacamole_containers.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_guacamole_containers_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_guacamole_containers.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # Guacamole containers support + network.SubnetArgs( + address_prefix=SREIpRanges.guacamole_containers_support.prefix, + name=subnet_guacamole_containers_support_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_guacamole_containers_support.id + ), + private_endpoint_network_policies=network.VirtualNetworkPrivateEndpointNetworkPolicies.ENABLED, + route_table=network.RouteTableArgs(id=route_table.id), + ), + # Identity containers + network.SubnetArgs( + address_prefix=SREIpRanges.identity_containers.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_identity_containers_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_identity_containers.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # Monitoring + network.SubnetArgs( + address_prefix=SREIpRanges.monitoring.prefix, + name=subnet_monitoring_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_monitoring.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # User services containers + network.SubnetArgs( + address_prefix=SREIpRanges.user_services_containers.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_user_services_containers_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_user_services_containers.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # User services containers support + network.SubnetArgs( + address_prefix=SREIpRanges.user_services_containers_support.prefix, + name=subnet_user_services_containers_support_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_user_services_containers_support.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # User services databases + network.SubnetArgs( + address_prefix=SREIpRanges.user_services_databases.prefix, + name=subnet_user_services_databases_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_user_services_databases.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # User services software repositories + network.SubnetArgs( + address_prefix=SREIpRanges.user_services_software_repositories.prefix, + delegations=[ + network.DelegationArgs( + name="SubnetDelegationContainerGroups", + service_name="Microsoft.ContainerInstance/containerGroups", + type="Microsoft.Network/virtualNetworks/subnets/delegations", + ), + ], + name=subnet_user_services_software_repositories_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_user_services_software_repositories.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + # Workspaces + network.SubnetArgs( + address_prefix=SREIpRanges.workspaces.prefix, + name=subnet_workspaces_name, + network_security_group=network.NetworkSecurityGroupArgs( + id=nsg_workspaces.id + ), + route_table=network.RouteTableArgs(id=route_table.id), + ), + ], + virtual_network_name=f"{stack_name}-vnet", + virtual_network_peerings=[], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + ignore_changes=["virtual_network_peerings"] + ), # allow peering to SHM virtual network + ), + tags=child_tags, + ) + + # Peer the SRE virtual network to the DNS virtual network + network.VirtualNetworkPeering( + f"{self._name}_sre_to_dns_peering", + remote_virtual_network=network.SubResourceArgs( + id=props.dns_virtual_network_id + ), + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + virtual_network_peering_name=Output.concat( + "peer_sre_", props.sre_name, "_to_dns" + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_virtual_network) + ), + ) + network.VirtualNetworkPeering( + f"{self._name}_dns_to_sre_peering", + remote_virtual_network=network.SubResourceArgs(id=sre_virtual_network.id), + resource_group_name=props.resource_group_name, + virtual_network_name=props.dns_virtual_network_name, + virtual_network_peering_name=Output.concat( + "peer_dns_to_sre_", props.sre_name + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_virtual_network) + ), + ) + + # Define SRE DNS zone + shm_dns_zone = Output.all( + resource_group_name=props.shm_resource_group_name, + zone_name=props.shm_zone_name, + ).apply( + lambda kwargs: network.get_zone( + resource_group_name=kwargs["resource_group_name"], + zone_name=kwargs["zone_name"], + ) + ) + sre_subdomain = Output.from_input(props.sre_name).apply( + lambda name: alphanumeric(name).lower() + ) + sre_fqdn = Output.concat(sre_subdomain, ".", props.shm_fqdn) + sre_dns_zone = network.Zone( + f"{self._name}_dns_zone", + location="Global", + resource_group_name=props.resource_group_name, + zone_name=sre_fqdn, + zone_type=network.ZoneType.PUBLIC, + opts=child_opts, + tags=child_tags, + ) + shm_ns_record = network.RecordSet( + f"{self._name}_ns_record", + ns_records=sre_dns_zone.name_servers.apply( + lambda servers: [network.NsRecordArgs(nsdname=ns) for ns in servers] + ), + record_type="NS", + relative_record_set_name=sre_subdomain, + resource_group_name=props.shm_resource_group_name, + ttl=3600, + zone_name=shm_dns_zone.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_dns_zone) + ), + ) + network.RecordSet( + f"{self._name}_caa_record", + caa_records=[ + network.CaaRecordArgs( + flags=0, + tag="issue", + value="letsencrypt.org", + ) + ], + record_type="CAA", + relative_record_set_name="@", + resource_group_name=props.resource_group_name, + ttl=30, + zone_name=sre_dns_zone.name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_dns_zone) + ), + ) + + # Define SRE internal DNS zone + sre_private_dns_zone = network.PrivateZone( + f"{self._name}_private_zone", + location="Global", + private_zone_name=Output.concat("privatelink.", sre_fqdn), + resource_group_name=props.resource_group_name, + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_dns_zone) + ), + tags=child_tags, + ) + + # Link SRE private DNS zone to DNS virtual network + network.VirtualNetworkLink( + f"{self._name}_private_zone_internal_vnet_link", + location="Global", + private_zone_name=sre_private_dns_zone.name, + registration_enabled=False, + resource_group_name=props.resource_group_name, + virtual_network=network.SubResourceArgs(id=props.dns_virtual_network_id), + virtual_network_link_name=Output.concat( + "link-to-", props.dns_virtual_network_name + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_private_dns_zone) + ), + ) + + # Link Azure private DNS zones to virtual network + # Note that although the DNS virtual network is already linked to these zones, + # Azure Container Instances do not have an IP address during deployment and so + # must use default Azure DNS when setting up file mounts. This means that we + # need to be able to resolve the "Storage Account" private DNS zones. + for dns_zone_name, private_dns_zone in props.dns_private_zones.items(): + network.VirtualNetworkLink( + replace_separators( + f"{self._name}_private_zone_{dns_zone_name}_vnet_link", "_" + ), + location="Global", + private_zone_name=private_dns_zone.name, + registration_enabled=False, + resource_group_name=props.resource_group_name, + virtual_network=network.SubResourceArgs(id=sre_virtual_network.id), + virtual_network_link_name=Output.concat( + "link-to-", sre_virtual_network.name + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=sre_virtual_network) + ), + tags=child_tags, + ) + + # Register outputs + self.route_table_name = route_table.name + self.shm_ns_record = shm_ns_record + self.sre_fqdn = sre_dns_zone.name + self.sre_private_dns_zone = sre_private_dns_zone + self.subnet_application_gateway = network.get_subnet_output( + subnet_name=subnet_application_gateway_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_apt_proxy_server = network.get_subnet_output( + subnet_name=subnet_apt_proxy_server_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_clamav_mirror = network.get_subnet_output( + subnet_name=subnet_clamav_mirror_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_data_configuration = network.get_subnet_output( + subnet_name=subnet_data_configuration_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_data_desired_state = network.get_subnet_output( + subnet_name=subnet_data_desired_state_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_data_desired_state = network.get_subnet_output( + subnet_name=subnet_data_desired_state_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_data_private = network.get_subnet_output( + subnet_name=subnet_data_private_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_firewall = network.get_subnet_output( + subnet_name=subnet_firewall_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_firewall_management = network.get_subnet_output( + subnet_name=subnet_firewall_management_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_guacamole_containers = network.get_subnet_output( + subnet_name=subnet_guacamole_containers_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_guacamole_containers_support = network.get_subnet_output( + subnet_name=subnet_guacamole_containers_support_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_identity_containers = network.get_subnet_output( + subnet_name=subnet_identity_containers_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_monitoring = network.get_subnet_output( + subnet_name=subnet_monitoring_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_user_services_containers = network.get_subnet_output( + subnet_name=subnet_user_services_containers_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_user_services_containers_support = network.get_subnet_output( + subnet_name=subnet_user_services_containers_support_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_user_services_databases = network.get_subnet_output( + subnet_name=subnet_user_services_databases_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_user_services_software_repositories = network.get_subnet_output( + subnet_name=subnet_user_services_software_repositories_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.subnet_workspaces = network.get_subnet_output( + subnet_name=subnet_workspaces_name, + resource_group_name=props.resource_group_name, + virtual_network_name=sre_virtual_network.name, + ) + self.virtual_network = sre_virtual_network diff --git a/data_safe_haven/infrastructure/programs/sre/remote_desktop.py b/data_safe_haven/infrastructure/programs/sre/remote_desktop.py new file mode 100644 index 0000000000..354f22ea49 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/remote_desktop.py @@ -0,0 +1,423 @@ +"""Pulumi component for SRE remote desktop""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, network, storage + +from data_safe_haven.external import AzureIPv4Range +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_id_from_subnet, +) +from data_safe_haven.infrastructure.components import ( + EntraApplication, + EntraApplicationProps, + FileShareFile, + FileShareFileProps, + PostgresqlDatabaseComponent, + PostgresqlDatabaseProps, +) +from data_safe_haven.resources import resources_path +from data_safe_haven.utility import FileReader + + +class SRERemoteDesktopProps: + """Properties for SRERemoteDesktopComponent""" + + def __init__( + self, + allow_copy: Input[bool], + allow_paste: Input[bool], + database_password: Input[str], + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + entra_application_fqdn: Input[str], + entra_application_name: Input[str], + entra_auth_token: str, + entra_tenant_id: Input[str], + ldap_group_filter: Input[str], + ldap_group_search_base: Input[str], + ldap_server_hostname: Input[str], + ldap_server_port: Input[int], + ldap_user_filter: Input[str], + ldap_user_search_base: Input[str], + location: Input[str], + resource_group_name: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + subnet_guacamole_containers: Input[network.GetSubnetResult], + subnet_guacamole_containers_support: Input[network.GetSubnetResult], + database_username: Input[str] | None = "postgresadmin", + ) -> None: + self.database_password = database_password + self.database_username = ( + database_username if database_username else "postgresadmin" + ) + self.disable_copy = not allow_copy + self.disable_paste = not allow_paste + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.entra_application_name = entra_application_name + self.entra_application_url = Output.concat("https://", entra_application_fqdn) + self.entra_auth_token = entra_auth_token + self.entra_tenant_id = entra_tenant_id + self.ldap_group_filter = ldap_group_filter + self.ldap_group_search_base = ldap_group_search_base + self.ldap_server_hostname = ldap_server_hostname + self.ldap_server_port = ldap_server_port + self.ldap_user_filter = ldap_user_filter + self.ldap_user_search_base = ldap_user_search_base + self.location = location + self.resource_group_name = resource_group_name + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + self.subnet_guacamole_containers_id = Output.from_input( + subnet_guacamole_containers + ).apply(get_id_from_subnet) + self.subnet_guacamole_containers_ip_addresses = Output.from_input( + subnet_guacamole_containers + ).apply( + lambda s: ( + [ + str(ip) + for ip in AzureIPv4Range.from_cidr(s.address_prefix).available() + ] + if s.address_prefix + else [] + ) + ) + self.subnet_guacamole_containers_support_id = Output.from_input( + subnet_guacamole_containers_support + ).apply(get_id_from_subnet) + self.subnet_guacamole_containers_support_ip_addresses = Output.from_input( + subnet_guacamole_containers_support + ).apply( + lambda s: ( + [ + str(ip) + for ip in AzureIPv4Range.from_cidr(s.address_prefix).available() + ] + if s.address_prefix + else [] + ) + ) + + +class SRERemoteDesktopComponent(ComponentResource): + """Deploy remote desktop gateway with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SRERemoteDesktopProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:RemoteDesktopComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "remote desktop"} | (tags if tags else {}) + + # Define Entra ID application + entra_application = EntraApplication( + f"{self._name}_entra_application", + EntraApplicationProps( + application_name=props.entra_application_name, + web_redirect_url=props.entra_application_url, + ), + auth_token=props.entra_auth_token, + opts=child_opts, + ) + + # Define configuration file shares + file_share = storage.FileShare( + f"{self._name}_file_share", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="remote-desktop-caddy", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + + # Upload Caddyfile + reader = FileReader(resources_path / "remote_desktop" / "caddy" / "Caddyfile") + FileShareFile( + f"{self._name}_file_share_caddyfile", + FileShareFileProps( + destination_path=reader.name, + share_name=file_share.name, + file_contents=Output.secret(reader.file_contents()), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge(child_opts, ResourceOptions(parent=file_share)), + ) + + # Define a PostgreSQL server to hold user and connection details + db_guacamole_connections = "guacamole" + db_server_guacamole = PostgresqlDatabaseComponent( + f"{self._name}_db_guacamole", + PostgresqlDatabaseProps( + database_names=[db_guacamole_connections], + database_password=props.database_password, + database_resource_group_name=props.resource_group_name, + database_server_name=f"{stack_name}-db-server-guacamole", + database_subnet_id=props.subnet_guacamole_containers_support_id, + database_username=props.database_username, + disable_secure_transport=False, + location=props.location, + ), + opts=child_opts, + tags=child_tags, + ) + + # Define the container group with guacd, guacamole and caddy + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-remote-desktop", + containers=[ + containerinstance.ContainerArgs( + image="caddy:2.8.4", + name="caddy"[:63], + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerNetworkProtocol.TCP, + ) + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=0.5, + memory_in_gb=0.5, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/etc/caddy", + name="caddy-etc-caddy", + read_only=False, + ), + ], + ), + # Note that the environment variables are not all documented. + # More information at https://github.com/apache/guacamole-client/blob/master/guacamole-docker/bin/start.sh + containerinstance.ContainerArgs( + image="guacamole/guacamole:1.5.5", + name="guacamole"[:63], + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="GUACD_HOSTNAME", value="localhost" + ), + containerinstance.EnvironmentVariableArgs( + name="LOGBACK_LEVEL", value="debug" + ), + containerinstance.EnvironmentVariableArgs( + name="OPENID_AUTHORIZATION_ENDPOINT", + value=Output.concat( + "https://login.microsoftonline.com/", + props.entra_tenant_id, + "/oauth2/v2.0/authorize", + ), + ), + containerinstance.EnvironmentVariableArgs( + name="OPENID_CLIENT_ID", + value=entra_application.application_id, + ), + containerinstance.EnvironmentVariableArgs( + name="OPENID_ISSUER", + value=Output.concat( + "https://login.microsoftonline.com/", + props.entra_tenant_id, + "/v2.0", + ), + ), + containerinstance.EnvironmentVariableArgs( + name="OPENID_JWKS_ENDPOINT", + value=Output.concat( + "https://login.microsoftonline.com/", + props.entra_tenant_id, + "/discovery/v2.0/keys", + ), + ), + containerinstance.EnvironmentVariableArgs( + name="OPENID_REDIRECT_URI", + value=props.entra_application_url, + ), + containerinstance.EnvironmentVariableArgs( + name="OPENID_USERNAME_CLAIM_TYPE", + value="preferred_username", # this is 'username@domain' + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_DATABASE", value=db_guacamole_connections + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_HOSTNAME", + value=props.subnet_guacamole_containers_support_ip_addresses[ + 0 + ], + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_PASSWORD", + secure_value=props.database_password, + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_SSL_MODE", value="require" + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_SOCKET_TIMEOUT", value="5" + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_USER", + value=props.database_username, + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=1, + memory_in_gb=1, + ), + ), + ), + containerinstance.ContainerArgs( + image="guacamole/guacd:1.5.5", + name="guacd"[:63], + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="GUACD_LOG_LEVEL", value="debug" + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=1, + memory_in_gb=1, + ), + ), + ), + containerinstance.ContainerArgs( + image="ghcr.io/alan-turing-institute/guacamole-user-sync:v0.5.0", + name="guacamole-user-sync"[:63], + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="LDAP_GROUP_BASE_DN", + value=props.ldap_group_search_base, + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_GROUP_NAME_ATTR", + value="cn", + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_GROUP_FILTER", + value=props.ldap_group_filter, + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_HOST", + value=props.ldap_server_hostname, + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_PORT", + value=Output.from_input(props.ldap_server_port).apply(str), + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_USER_NAME_ATTR", + value="oauth_username", # this is the name that users connect with + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_USER_BASE_DN", + value=props.ldap_user_search_base, + ), + containerinstance.EnvironmentVariableArgs( + name="LDAP_USER_FILTER", + value=props.ldap_user_filter, + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_DB_NAME", + value=db_guacamole_connections, + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_HOST", + value=props.subnet_guacamole_containers_support_ip_addresses[ + 0 + ], + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_PASSWORD", + secure_value=props.database_password, + ), + containerinstance.EnvironmentVariableArgs( + name="POSTGRESQL_USERNAME", + value=props.database_username, + ), + containerinstance.EnvironmentVariableArgs( + name="REPEAT_INTERVAL", + value="180", + ), + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=0.5, + memory_in_gb=0.5, + ), + ), + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret(props.dockerhub_credentials.access_token), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs( + id=props.subnet_guacamole_containers_id + ) + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="caddy-etc-caddy", + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, replace_on_changes=["containers"] + ), + ), + tags=child_tags, + ) + + # Register exports + self.exports = { + "connection_db_name": db_guacamole_connections, + "connection_db_server_name": db_server_guacamole.db_server.name, + "container_group_name": container_group.name, + "disable_copy": props.disable_copy, + "disable_paste": props.disable_paste, + "resource_group_name": props.resource_group_name, + } diff --git a/data_safe_haven/infrastructure/programs/sre/software_repositories.py b/data_safe_haven/infrastructure/programs/sre/software_repositories.py new file mode 100644 index 0000000000..2d7ebbd47d --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/software_repositories.py @@ -0,0 +1,337 @@ +"""Pulumi component for SRE monitoring""" + +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import containerinstance, storage + +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_ip_address_from_container_group, +) +from data_safe_haven.infrastructure.components import ( + FileShareFile, + FileShareFileProps, + LocalDnsRecordComponent, + LocalDnsRecordProps, +) +from data_safe_haven.resources import resources_path +from data_safe_haven.types import Ports, SoftwarePackageCategory +from data_safe_haven.utility import FileReader + + +class SRESoftwareRepositoriesProps: + """Properties for SRESoftwareRepositoriesComponent""" + + def __init__( + self, + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + location: Input[str], + nexus_admin_password: Input[str], + resource_group_name: Input[str], + software_packages: SoftwarePackageCategory, + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + subnet_id: Input[str], + ) -> None: + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.location = location + self.nexus_admin_password = Output.secret(nexus_admin_password) + self.nexus_packages: str | None = { + SoftwarePackageCategory.ANY: "all", + SoftwarePackageCategory.PRE_APPROVED: "selected", + SoftwarePackageCategory.NONE: None, + }[software_packages] + self.resource_group_name = resource_group_name + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + self.subnet_id = subnet_id + + +class SRESoftwareRepositoriesComponent(ComponentResource): + """Deploy SRE update servers with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SRESoftwareRepositoriesProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:SoftwareRepositoriesComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "software repositories"} | (tags if tags else {}) + + # Use a dummy hostname if no repositories are deployed + hostname: Output[str] = "example.com" + + # Define configuration file shares + file_share_caddy = storage.FileShare( + f"{self._name}_file_share_caddy", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="software-repositories-caddy", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + file_share_nexus = storage.FileShare( + f"{self._name}_file_share_nexus", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="software-repositories-nexus", + share_quota=5, + signed_identifiers=[], + opts=child_opts, + ) + file_share_nexus_allowlists = storage.FileShare( + f"{self._name}_file_share_nexus_allowlists", + access_tier=storage.ShareAccessTier.COOL, + account_name=props.storage_account_name, + resource_group_name=props.resource_group_name, + share_name="software-repositories-nexus-allowlists", + share_quota=1, + signed_identifiers=[], + opts=child_opts, + ) + + # Upload Caddyfile + caddyfile_reader = FileReader( + resources_path / "software_repositories" / "caddy" / "Caddyfile" + ) + FileShareFile( + f"{self._name}_file_share_caddyfile", + FileShareFileProps( + destination_path=caddyfile_reader.name, + share_name=file_share_caddy.name, + file_contents=Output.secret(caddyfile_reader.file_contents()), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_caddy) + ), + ) + + # Upload Nexus allowlists + cran_reader = FileReader( + resources_path / "software_repositories" / "allowlists" / "cran.allowlist" + ) + FileShareFile( + f"{self._name}_file_share_cran_allowlist", + FileShareFileProps( + destination_path=cran_reader.name, + share_name=file_share_nexus_allowlists.name, + file_contents=cran_reader.file_contents(), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_nexus) + ), + ) + pypi_reader = FileReader( + resources_path / "software_repositories" / "allowlists" / "pypi.allowlist" + ) + FileShareFile( + f"{self._name}_file_share_pypi_allowlist", + FileShareFileProps( + destination_path=pypi_reader.name, + share_name=file_share_nexus_allowlists.name, + file_contents=pypi_reader.file_contents(), + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=file_share_nexus) + ), + ) + + # Define the container group with nexus and caddy + if props.nexus_packages: + container_group = containerinstance.ContainerGroup( + f"{self._name}_container_group", + container_group_name=f"{stack_name}-container-group-software-repositories", + containers=[ + containerinstance.ContainerArgs( + image="caddy:2.8.4", + name="caddy"[:63], + ports=[ + containerinstance.ContainerPortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=0.5, + memory_in_gb=0.5, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/etc/caddy", + name="caddy-etc-caddy", + read_only=True, + ), + ], + ), + containerinstance.ContainerArgs( + image="sonatype/nexus3:3.70.1", + name="nexus"[:63], + environment_variables=[], + ports=[], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=3, + memory_in_gb=4, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/nexus-data", + name="nexus-nexus-data", + read_only=False, + ), + ], + ), + containerinstance.ContainerArgs( + image="ghcr.io/alan-turing-institute/nexus-allowlist:v0.9.0", + name="nexus-allowlist"[:63], + environment_variables=[ + containerinstance.EnvironmentVariableArgs( + name="NEXUS_ADMIN_PASSWORD", + secure_value=props.nexus_admin_password, + ), + containerinstance.EnvironmentVariableArgs( + name="NEXUS_PACKAGES", + value=props.nexus_packages, + ), + containerinstance.EnvironmentVariableArgs( + name="NEXUS_HOST", + value="localhost", + ), + containerinstance.EnvironmentVariableArgs( + name="NEXUS_PORT", + value=Ports.NEXUS, + ), + # Use fallback updating method due to issue with changes to + # files on Azure storage mount not being recognised by entr + containerinstance.EnvironmentVariableArgs( + name="ENTR_FALLBACK", + value="1", + ), + ], + ports=[], + resources=containerinstance.ResourceRequirementsArgs( + requests=containerinstance.ResourceRequestsArgs( + cpu=0.5, + memory_in_gb=0.5, + ), + ), + volume_mounts=[ + containerinstance.VolumeMountArgs( + mount_path="/allowlists", + name="nexus-allowlists-allowlists", + read_only=True, + ), + containerinstance.VolumeMountArgs( + mount_path="/nexus-data", + name="nexus-nexus-data", + read_only=True, + ), + ], + ), + ], + dns_config=containerinstance.DnsConfigurationArgs( + name_servers=[props.dns_server_ip], + ), + # Required due to DockerHub rate-limit: https://docs.docker.com/docker-hub/download-rate-limit/ + image_registry_credentials=[ + { + "password": Output.secret( + props.dockerhub_credentials.access_token + ), + "server": props.dockerhub_credentials.server, + "username": props.dockerhub_credentials.username, + } + ], + ip_address=containerinstance.IpAddressArgs( + ports=[ + containerinstance.PortArgs( + port=80, + protocol=containerinstance.ContainerGroupNetworkProtocol.TCP, + ) + ], + type=containerinstance.ContainerGroupIpAddressType.PRIVATE, + ), + location=props.location, + os_type=containerinstance.OperatingSystemTypes.LINUX, + resource_group_name=props.resource_group_name, + restart_policy=containerinstance.ContainerGroupRestartPolicy.ALWAYS, + sku=containerinstance.ContainerGroupSku.STANDARD, + subnet_ids=[ + containerinstance.ContainerGroupSubnetIdArgs(id=props.subnet_id) + ], + volumes=[ + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_caddy.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="caddy-etc-caddy", + ), + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_nexus.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="nexus-nexus-data", + ), + containerinstance.VolumeArgs( + azure_file=containerinstance.AzureFileVolumeArgs( + share_name=file_share_nexus_allowlists.name, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + name="nexus-allowlists-allowlists", + ), + ], + opts=ResourceOptions.merge( + child_opts, + ResourceOptions( + delete_before_replace=True, replace_on_changes=["containers"] + ), + ), + tags=child_tags, + ) + + # Register the container group in the SRE DNS zone + local_dns = LocalDnsRecordComponent( + f"{self._name}_nexus_dns_record_set", + LocalDnsRecordProps( + base_fqdn=props.sre_fqdn, + private_ip_address=get_ip_address_from_container_group( + container_group + ), + record_name="nexus", + resource_group_name=props.resource_group_name, + ), + opts=ResourceOptions.merge( + child_opts, ResourceOptions(parent=container_group) + ), + ) + + hostname = local_dns.hostname + + # Register outputs + self.hostname = hostname diff --git a/data_safe_haven/infrastructure/programs/sre/user_services.py b/data_safe_haven/infrastructure/programs/sre/user_services.py new file mode 100644 index 0000000000..a5abed4fe8 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/user_services.py @@ -0,0 +1,181 @@ +from collections.abc import Mapping + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import network + +from data_safe_haven.infrastructure.common import ( + DockerHubCredentials, + get_id_from_subnet, +) +from data_safe_haven.types import DatabaseSystem, SoftwarePackageCategory + +from .database_servers import SREDatabaseServerComponent, SREDatabaseServerProps +from .gitea_server import SREGiteaServerComponent, SREGiteaServerProps +from .hedgedoc_server import SREHedgeDocServerComponent, SREHedgeDocServerProps +from .software_repositories import ( + SRESoftwareRepositoriesComponent, + SRESoftwareRepositoriesProps, +) + + +class SREUserServicesProps: + """Properties for SREUserServicesComponent""" + + def __init__( + self, + database_service_admin_password: Input[str], + databases: list[DatabaseSystem], # this must *not* be passed as an Input[T] + dns_server_ip: Input[str], + dockerhub_credentials: DockerHubCredentials, + gitea_database_password: Input[str], + hedgedoc_database_password: Input[str], + ldap_server_hostname: Input[str], + ldap_server_port: Input[int], + ldap_username_attribute: Input[str], + ldap_user_filter: Input[str], + ldap_user_search_base: Input[str], + location: Input[str], + nexus_admin_password: Input[str], + resource_group_name: Input[str], + software_packages: SoftwarePackageCategory, + sre_fqdn: Input[str], + storage_account_key: Input[str], + storage_account_name: Input[str], + subnet_containers: Input[network.GetSubnetResult], + subnet_containers_support: Input[network.GetSubnetResult], + subnet_databases: Input[network.GetSubnetResult], + subnet_software_repositories: Input[network.GetSubnetResult], + ) -> None: + self.database_service_admin_password = database_service_admin_password + self.databases = databases + self.dns_server_ip = dns_server_ip + self.dockerhub_credentials = dockerhub_credentials + self.gitea_database_password = gitea_database_password + self.hedgedoc_database_password = hedgedoc_database_password + self.ldap_server_hostname = ldap_server_hostname + self.ldap_server_port = ldap_server_port + self.ldap_username_attribute = ldap_username_attribute + self.ldap_user_filter = ldap_user_filter + self.ldap_user_search_base = ldap_user_search_base + self.location = location + self.nexus_admin_password = Output.secret(nexus_admin_password) + self.resource_group_name = resource_group_name + self.software_packages = software_packages + self.sre_fqdn = sre_fqdn + self.storage_account_key = storage_account_key + self.storage_account_name = storage_account_name + self.subnet_containers_id = Output.from_input(subnet_containers).apply( + get_id_from_subnet + ) + self.subnet_containers_support_id = Output.from_input( + subnet_containers_support + ).apply(get_id_from_subnet) + self.subnet_databases_id = Output.from_input(subnet_databases).apply( + get_id_from_subnet + ) + self.subnet_software_repositories_id = Output.from_input( + subnet_software_repositories + ).apply(get_id_from_subnet) + + +class SREUserServicesComponent(ComponentResource): + """Deploy user services with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREUserServicesProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:UserServicesComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "user services"} | (tags if tags else {}) + + # Deploy the Gitea server + SREGiteaServerComponent( + "sre_gitea_server", + stack_name, + SREGiteaServerProps( + containers_subnet_id=props.subnet_containers_id, + database_subnet_id=props.subnet_containers_support_id, + database_password=props.gitea_database_password, + dns_server_ip=props.dns_server_ip, + dockerhub_credentials=props.dockerhub_credentials, + ldap_server_hostname=props.ldap_server_hostname, + ldap_server_port=props.ldap_server_port, + ldap_username_attribute=props.ldap_username_attribute, + ldap_user_filter=props.ldap_user_filter, + ldap_user_search_base=props.ldap_user_search_base, + location=props.location, + resource_group_name=props.resource_group_name, + sre_fqdn=props.sre_fqdn, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=child_opts, + tags=child_tags, + ) + + # Deploy the HedgeDoc server + SREHedgeDocServerComponent( + "sre_hedgedoc_server", + stack_name, + SREHedgeDocServerProps( + containers_subnet_id=props.subnet_containers_id, + database_password=props.hedgedoc_database_password, + database_subnet_id=props.subnet_containers_support_id, + dns_server_ip=props.dns_server_ip, + dockerhub_credentials=props.dockerhub_credentials, + ldap_server_hostname=props.ldap_server_hostname, + ldap_server_port=props.ldap_server_port, + ldap_username_attribute=props.ldap_username_attribute, + ldap_user_filter=props.ldap_user_filter, + ldap_user_search_base=props.ldap_user_search_base, + location=props.location, + resource_group_name=props.resource_group_name, + sre_fqdn=props.sre_fqdn, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + ), + opts=child_opts, + tags=child_tags, + ) + + # Deploy software repository servers + self.software_repositories = SRESoftwareRepositoriesComponent( + "sre_software_repositories", + stack_name, + SRESoftwareRepositoriesProps( + dns_server_ip=props.dns_server_ip, + dockerhub_credentials=props.dockerhub_credentials, + location=props.location, + nexus_admin_password=props.nexus_admin_password, + resource_group_name=props.resource_group_name, + sre_fqdn=props.sre_fqdn, + software_packages=props.software_packages, + storage_account_key=props.storage_account_key, + storage_account_name=props.storage_account_name, + subnet_id=props.subnet_software_repositories_id, + ), + opts=child_opts, + tags=child_tags, + ) + + # Deploy whichever database systems are selected + for database in props.databases: + SREDatabaseServerComponent( + f"sre_{database.value}_database_server", + stack_name, + SREDatabaseServerProps( + database_password=props.database_service_admin_password, + database_system=database, + location=props.location, + resource_group_name=props.resource_group_name, + sre_fqdn=props.sre_fqdn, + subnet_id=props.subnet_databases_id, + ), + opts=child_opts, + tags=child_tags, + ) diff --git a/data_safe_haven/infrastructure/programs/sre/workspaces.py b/data_safe_haven/infrastructure/programs/sre/workspaces.py new file mode 100644 index 0000000000..954bd06bb1 --- /dev/null +++ b/data_safe_haven/infrastructure/programs/sre/workspaces.py @@ -0,0 +1,185 @@ +from collections.abc import Mapping +from typing import Any + +import chevron +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import network + +from data_safe_haven.exceptions import DataSafeHavenPulumiError +from data_safe_haven.functions import b64encode, replace_separators +from data_safe_haven.infrastructure.common import ( + get_available_ips_from_subnet, + get_name_from_subnet, + get_name_from_vnet, +) +from data_safe_haven.infrastructure.components import LinuxVMComponentProps, VMComponent +from data_safe_haven.logging import get_logger +from data_safe_haven.resources import resources_path + + +class SREWorkspacesProps: + """Properties for SREWorkspacesComponent""" + + def __init__( + self, + admin_password: Input[str], + apt_proxy_server_hostname: Input[str], + clamav_mirror_hostname: Input[str], + data_collection_endpoint_id: Input[str], + data_collection_rule_id: Input[str], + database_service_admin_password: Input[str], + ldap_group_filter: Input[str], + ldap_group_search_base: Input[str], + ldap_server_hostname: Input[str], + ldap_server_port: Input[int], + ldap_user_filter: Input[str], + ldap_user_search_base: Input[str], + location: Input[str], + maintenance_configuration_id: Input[str], + resource_group_name: Input[str], + software_repository_hostname: Input[str], + sre_name: Input[str], + storage_account_data_desired_state_name: Input[str], + storage_account_data_private_sensitive_name: Input[str], + storage_account_data_private_user_name: Input[str], + subnet_workspaces: Input[network.GetSubnetResult], + subscription_name: Input[str], + virtual_network: Input[network.VirtualNetwork], + vm_details: list[tuple[int, str]], # this must *not* be passed as an Input[T] + ) -> None: + self.admin_password = Output.secret(admin_password) + self.admin_username = "dshadmin" + self.apt_proxy_server_hostname = apt_proxy_server_hostname + self.clamav_mirror_hostname = clamav_mirror_hostname + self.data_collection_rule_id = data_collection_rule_id + self.data_collection_endpoint_id = data_collection_endpoint_id + self.database_service_admin_password = database_service_admin_password + self.ldap_group_filter = ldap_group_filter + self.ldap_group_search_base = ldap_group_search_base + self.ldap_server_hostname = ldap_server_hostname + self.ldap_server_port = Output.from_input(ldap_server_port).apply(str) + self.ldap_user_filter = ldap_user_filter + self.ldap_user_search_base = ldap_user_search_base + self.location = location + self.maintenance_configuration_id = maintenance_configuration_id + self.resource_group_name = resource_group_name + self.software_repository_hostname = software_repository_hostname + self.sre_name = sre_name + self.storage_account_data_desired_state_name = ( + storage_account_data_desired_state_name + ) + self.storage_account_data_private_user_name = ( + storage_account_data_private_user_name + ) + self.storage_account_data_private_sensitive_name = ( + storage_account_data_private_sensitive_name + ) + self.subscription_name = subscription_name + self.virtual_network_name = Output.from_input(virtual_network).apply( + get_name_from_vnet + ) + self.subnet_workspaces_name = Output.from_input(subnet_workspaces).apply( + get_name_from_subnet + ) + self.vm_ip_addresses = Output.all(subnet_workspaces, vm_details).apply( + lambda args: self.get_ip_addresses(subnet=args[0], vm_details=args[1]) + ) + self.vm_details = vm_details + + def get_ip_addresses(self, subnet: Any, vm_details: Any) -> list[str]: + if not isinstance(subnet, network.GetSubnetResult): + DataSafeHavenPulumiError(f"'subnet' has invalid type {type(subnet)}") + if not isinstance(vm_details, list): + DataSafeHavenPulumiError( + f"'vm_details' has invalid type {type(vm_details)}" + ) + return get_available_ips_from_subnet(subnet)[: len(vm_details)] + + +class SREWorkspacesComponent(ComponentResource): + """Deploy workspaces with Pulumi""" + + def __init__( + self, + name: str, + stack_name: str, + props: SREWorkspacesProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ) -> None: + super().__init__("dsh:sre:WorkspacesComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "workspaces"} | (tags if tags else {}) + + # Load cloud-init file + cloudinit = Output.all( + apt_proxy_server_hostname=props.apt_proxy_server_hostname, + clamav_mirror_hostname=props.clamav_mirror_hostname, + database_service_admin_password=props.database_service_admin_password, + ldap_group_filter=props.ldap_group_filter, + ldap_group_search_base=props.ldap_group_search_base, + ldap_server_hostname=props.ldap_server_hostname, + ldap_server_port=props.ldap_server_port, + ldap_user_filter=props.ldap_user_filter, + ldap_user_search_base=props.ldap_user_search_base, + software_repository_hostname=props.software_repository_hostname, + storage_account_data_desired_state_name=props.storage_account_data_desired_state_name, + storage_account_data_private_user_name=props.storage_account_data_private_user_name, + storage_account_data_private_sensitive_name=props.storage_account_data_private_sensitive_name, + ).apply(lambda kwargs: self.template_cloudinit(**kwargs)) + + # Deploy a variable number of VMs depending on the input parameters + vms = [ + VMComponent( + replace_separators(f"{self._name}_vm_workspace_{vm_idx+1:02d}", "_"), + LinuxVMComponentProps( + admin_password=props.admin_password, + admin_username=props.admin_username, + b64cloudinit=cloudinit.apply(b64encode), + data_collection_rule_id=props.data_collection_rule_id, + data_collection_endpoint_id=props.data_collection_endpoint_id, + ip_address_private=props.vm_ip_addresses[vm_idx], + location=props.location, + maintenance_configuration_id=props.maintenance_configuration_id, + resource_group_name=props.resource_group_name, + subnet_name=props.subnet_workspaces_name, + virtual_network_name=props.virtual_network_name, + virtual_network_resource_group_name=props.resource_group_name, + vm_name=Output.concat( + stack_name, "-vm-workspace-", f"{vm_idx+1:02d}" + ).apply(lambda s: replace_separators(s, "-")), + vm_size=vm_size, + ), + opts=child_opts, + tags=child_tags, + ) + for vm_idx, vm_size in props.vm_details + ] + + # Get details for each deployed VM + vm_outputs: list[dict[str, Any]] = [ + { + "ip_address": vm.ip_address_private, + "name": vm.vm_name, + "sku": vm.vm_size, + } + for vm in vms + ] + + # Register exports + self.exports = { + "vm_outputs": vm_outputs, + } + + @staticmethod + def template_cloudinit(**kwargs: str) -> str: + logger = get_logger() + with open( + resources_path / "workspace" / "workspace.cloud_init.mustache.yaml", + encoding="utf-8", + ) as f_cloudinit: + cloudinit = chevron.render(f_cloudinit, kwargs) + logger.debug( + f"Generated cloud-init config: {cloudinit.replace('\n', r'\n')}" + ) + return cloudinit diff --git a/data_safe_haven/infrastructure/project_manager.py b/data_safe_haven/infrastructure/project_manager.py new file mode 100644 index 0000000000..f5fede53b4 --- /dev/null +++ b/data_safe_haven/infrastructure/project_manager.py @@ -0,0 +1,442 @@ +"""Manage Pulumi projects""" + +import logging +import time +from contextlib import suppress +from importlib import metadata +from typing import Any + +from pulumi import automation + +from data_safe_haven.config import ( + Context, + DSHPulumiConfig, + DSHPulumiProject, + SREConfig, +) +from data_safe_haven.exceptions import ( + DataSafeHavenAzureError, + DataSafeHavenConfigError, + DataSafeHavenError, + DataSafeHavenPulumiError, +) +from data_safe_haven.external import AzureSdk, PulumiAccount +from data_safe_haven.functions import get_key_vault_name, replace_separators +from data_safe_haven.logging import from_ansi, get_console_handler, get_logger + +from .programs import DeclarativeSRE + + +class ProjectManager: + """ + Interact with DSH infrastructure using Pulumi + + Constructing a ProjectManager creates a Pulumi project, with a single stack. The + Pulumi project's program corresponds to either an SHM or SRE. Methods provider a + high level, DSH focused interface to call Pulumi operations on the project, + including `pulumi up` and `pulumi destroy`. + """ + + def __init__( + self, + context: Context, + pulumi_config: DSHPulumiConfig, + pulumi_project_name: str, + program: DeclarativeSRE, + *, + create_project: bool, + ) -> None: + self._options: dict[str, tuple[str, bool, bool]] = {} + self._pulumi_project: DSHPulumiProject | None = None + self._stack: automation.Stack | None = None + self._stack_outputs: automation.OutputMap | None = None + self.account = PulumiAccount( + resource_group_name=context.resource_group_name, + storage_account_name=context.storage_account_name, + subscription_name=context.subscription_name, + ) + self.context = context + self.create_project = create_project + self.logger = get_logger() + self.program = program + self.project_name = replace_separators(context.tags["project"].lower(), "-") + self.pulumi_config = pulumi_config + self.pulumi_project_name = pulumi_project_name + self.stack_name = self.program.stack_name + + @property + def pulumi_extra_args(self) -> dict[str, Any]: + extra_args: dict[str, Any] = {} + # Produce verbose Pulumi output if running in verbose mode + if get_console_handler().level <= logging.DEBUG: + extra_args["debug"] = True + extra_args["log_to_std_err"] = True + extra_args["log_verbosity"] = 9 + else: + extra_args["debug"] = None + extra_args["log_to_std_err"] = None + extra_args["log_verbosity"] = None + + extra_args["color"] = "always" + extra_args["log_flow"] = True + extra_args["on_output"] = self.log_message + return extra_args + + @property + def project_settings(self) -> automation.ProjectSettings: + return automation.ProjectSettings( + name="data-safe-haven", + runtime="python", + backend=automation.ProjectBackend(url=self.context.pulumi_backend_url), + ) + + @property + def stack_settings(self) -> automation.StackSettings: + return automation.StackSettings( + config=self.pulumi_project.stack_config, + encrypted_key=self.pulumi_config.encrypted_key, + secrets_provider=self.context.pulumi_secrets_provider_url, + ) + + @property + def pulumi_project(self) -> DSHPulumiProject: + if not self._pulumi_project: + # Create DSH Pulumi Project if it does not exist, otherwise use existing + if self.create_project: + self._pulumi_project = self.pulumi_config.create_or_select_project( + self.pulumi_project_name + ) + else: + try: + self._pulumi_project = self.pulumi_config[self.pulumi_project_name] + except (KeyError, TypeError) as exc: + msg = f"No SRE named {self.pulumi_project_name} is defined." + raise DataSafeHavenConfigError(msg) from exc + return self._pulumi_project + + @property + def stack(self) -> automation.Stack: + """Load the Pulumi stack, creating if needed.""" + if not self._stack: + self.logger.debug(f"Creating/loading stack [green]{self.stack_name}[/].") + try: + self._stack = automation.create_or_select_stack( + opts=automation.LocalWorkspaceOptions( + env_vars=self.account.env, + project_settings=self.project_settings, + secrets_provider=self.context.pulumi_secrets_provider_url, + stack_settings={self.stack_name: self.stack_settings}, + ), + program=self.program, + project_name=self.project_name, + stack_name=self.stack_name, + ) + self.logger.info(f"Loaded stack [green]{self.stack_name}[/].") + # Ensure encrypted key is stored in the Pulumi configuration + self.update_dsh_pulumi_encrypted_key(self._stack.workspace) + # Ensure workspace plugins are installed + self.install_plugins(self._stack.workspace) + except automation.CommandError as exc: + self.log_exception(exc) + msg = f"Could not load Pulumi stack {self.stack_name}." + raise DataSafeHavenPulumiError(msg) from exc + return self._stack + + def add_option(self, name: str, value: str, *, replace: bool) -> None: + """Add a public configuration option""" + self._options[name] = (value, False, replace) + + def apply_config_options(self) -> None: + """Set Pulumi config options""" + try: + self.logger.debug("Updating Pulumi configuration") + for name, (value, is_secret, replace) in self._options.items(): + if replace: + self.set_config(name, value, secret=is_secret) + else: + self.ensure_config(name, value, secret=is_secret) + self._options = {} + except Exception as exc: + msg = "Applying Pulumi configuration options failed.." + raise DataSafeHavenPulumiError(msg) from exc + + def cancel(self) -> None: + """Cancel ongoing Pulumi operation.""" + try: + self.logger.warning( + f"Cancelling ongoing Pulumi operation for stack [green]{self.stack.name}[/]." + ) + self.stack.cancel() + self.logger.warning( + f"Removing any ambiguous Pulumi resources from stack [green]{self.stack.name}[/]." + ) + self.run_pulumi_command("refresh --clear-pending-creates --yes") + self.logger.warning( + "If you see '[bold]cannot create already existing resource[/]' errors, please manually delete these resources from Azure." + ) + except automation.CommandError: + self.logger.error( + f"No ongoing Pulumi operation found for stack [green]{self.stack.name}[/]." + ) + + def cleanup(self) -> None: + """Cleanup deployed infrastructure.""" + try: + azure_sdk = AzureSdk(self.context.subscription_name) + # Remove stack JSON + try: + self.logger.debug(f"Removing Pulumi stack [green]{self.stack_name}[/].") + if self._stack: + self._stack.workspace.remove_stack(self.stack_name) + self.logger.info( + f"Removed Pulumi stack [green]{self.stack_name}[/]." + ) + except automation.CommandError as exc: + self.log_exception(exc) + if "no stack named" not in str(exc): + msg = "Pulumi stack could not be removed." + raise DataSafeHavenPulumiError(msg) from exc + # Remove stack JSON backup + try: + stack_backup_name = f"{self.stack_name}.json.bak" + self.logger.debug( + f"Removing Pulumi stack backup [green]{stack_backup_name}[/]." + ) + if azure_sdk.blob_exists( + blob_name=f".pulumi/stacks/{self.project_name}/{stack_backup_name}", + resource_group_name=self.context.resource_group_name, + storage_account_name=self.context.storage_account_name, + storage_container_name=self.context.pulumi_storage_container_name, + ): + azure_sdk.remove_blob( + blob_name=f".pulumi/stacks/{self.project_name}/{stack_backup_name}", + resource_group_name=self.context.resource_group_name, + storage_account_name=self.context.storage_account_name, + storage_container_name=self.context.pulumi_storage_container_name, + ) + self.logger.info( + f"Removed Pulumi stack backup [green]{stack_backup_name}[/]." + ) + except DataSafeHavenAzureError as exc: + if "blob does not exist" in str(exc): + self.logger.warning( + f"Pulumi stack backup [green]{stack_backup_name}[/] could not be removed." + ) + else: + msg = "Pulumi stack backup could not be removed." + raise DataSafeHavenPulumiError(msg) from exc + # Purge the key vault, which otherwise blocks re-use of this SRE name + key_vault_name = get_key_vault_name(self.stack_name) + self.logger.debug( + f"Attempting to purge Azure Key Vault [green]{key_vault_name}[/]." + ) + if azure_sdk.purge_keyvault( + key_vault_name, self.program.config.azure.location + ): + self.logger.info(f"Purged Azure Key Vault [green]{key_vault_name}[/].") + except DataSafeHavenError as exc: + msg = "Pulumi destroy failed." + raise DataSafeHavenPulumiError(msg) from exc + + def deploy(self, *, force: bool = False) -> None: + """Deploy the infrastructure with Pulumi.""" + try: + self.apply_config_options() + if force: + self.cancel() + self.refresh() + self.preview() + self.update() + except Exception as exc: + msg = "Pulumi deployment failed." + raise DataSafeHavenPulumiError(msg) from exc + + def destroy(self) -> None: + """Destroy deployed infrastructure.""" + try: + # Note that the first iteration can fail due to failure to delete container NICs + # See https://github.com/MicrosoftDocs/azure-docs/issues/20737 for details + while True: + try: + result = self.stack.destroy( + **self.pulumi_extra_args, + ) + self.evaluate(result.summary.result) + break + except automation.CommandError as exc: + if any( + error in str(exc) + for error in ( + "Linked Service is used by a solution", + "NetworkProfileAlreadyInUseWithContainerNics", + "InUseSubnetCannotBeDeleted", + ) + ): + time.sleep(10) + else: + msg = "Pulumi resource destruction failed." + raise DataSafeHavenPulumiError(msg) from exc + except DataSafeHavenError as exc: + msg = "Pulumi destroy failed." + raise DataSafeHavenPulumiError(msg) from exc + + def ensure_config(self, name: str, value: str, *, secret: bool) -> None: + """Ensure that config values have been set, setting them if they do not exist""" + try: + self.stack.get_config(name) + except automation.CommandError: + self.set_config(name, value, secret=secret) + + def evaluate(self, result: str) -> None: + """Evaluate a Pulumi operation.""" + if result == "succeeded": + self.logger.info("Pulumi operation [green]succeeded[/].") + else: + self.logger.error("Pulumi operation [red]failed[/].") + msg = "Pulumi operation failed." + raise DataSafeHavenPulumiError(msg) + + def install_plugins(self, workspace: automation.Workspace) -> None: + """For inline programs, we must manage plugins ourselves.""" + try: + self.logger.debug("Installing required Pulumi plugins") + workspace.install_plugin( + "azure-native", metadata.version("pulumi-azure-native") + ) + workspace.install_plugin("random", metadata.version("pulumi-random")) + except Exception as exc: + msg = "Installing Pulumi plugins failed.." + raise DataSafeHavenPulumiError(msg) from exc + + def log_exception(self, exc: automation.CommandError) -> None: + with suppress(IndexError): + stderr = str(exc).split("\n")[3].replace(" stderr: ", "") + self.log_message(f"Pulumi output: {stderr}") + + def log_message(self, message: str) -> None: + return from_ansi(self.logger, message) + + def output(self, name: str) -> Any: + """Get a named output value from a stack""" + if not self._stack_outputs: + self._stack_outputs = self.stack.outputs() + return self._stack_outputs[name].value + + def preview(self) -> None: + """Preview the Pulumi stack.""" + try: + self.logger.info( + f"Previewing changes for stack [green]{self.stack.name}[/]." + ) + with suppress(automation.CommandError): + # Note that we disable parallelisation which can cause deadlock + self.stack.preview( + diff=True, + parallel=1, + **self.pulumi_extra_args, + ) + except Exception as exc: + msg = "Pulumi preview failed.." + raise DataSafeHavenPulumiError(msg) from exc + + def refresh(self) -> None: + """Refresh the Pulumi stack.""" + try: + self.logger.info(f"Refreshing stack [green]{self.stack.name}[/].") + # Note that we disable parallelisation which can cause deadlock + self.stack.refresh(parallel=1, **self.pulumi_extra_args) + except automation.CommandError as exc: + self.log_exception(exc) + msg = "Pulumi refresh failed." + raise DataSafeHavenPulumiError(msg) from exc + + def run_pulumi_command(self, command: str) -> str: + """Run a Pulumi non-interactive CLI command using this project and stack.""" + try: + result = self.stack._run_pulumi_cmd_sync(command.split()) + return str(result.stdout) + except automation.CommandError as exc: + self.log_exception(exc) + msg = f"Failed to run command '{command}'." + raise DataSafeHavenPulumiError(msg) from exc + + def secret(self, name: str) -> str: + """Read a secret from the Pulumi stack.""" + try: + return str(self.stack.get_config(name).value) + except automation.CommandError as exc: + self.log_exception(exc) + msg = f"Secret '{name}' was not found." + raise DataSafeHavenPulumiError(msg) from exc + + def set_config(self, name: str, value: str, *, secret: bool) -> None: + """Set config values, overwriting any existing value.""" + self.stack.set_config(name, automation.ConfigValue(value=value, secret=secret)) + self.update_dsh_pulumi_project() + + def teardown(self, *, force: bool = False) -> None: + """Teardown the infrastructure deployed with Pulumi.""" + try: + if force: + self.cancel() + self.refresh() + self.destroy() + self.cleanup() + except Exception as exc: + self.log_exception(exc) + msg = "Tearing down Pulumi infrastructure failed.." + raise DataSafeHavenPulumiError(msg) from exc + + def update(self) -> None: + """Update deployed infrastructure.""" + try: + self.logger.info(f"Applying changes to stack [green]{self.stack.name}[/].") + result = self.stack.up( + **self.pulumi_extra_args, + ) + self.evaluate(result.summary.result) + self.update_dsh_pulumi_project() + except automation.CommandError as exc: + self.log_exception(exc) + msg = "Pulumi update failed." + raise DataSafeHavenPulumiError(msg) from exc + + def update_dsh_pulumi_project(self) -> None: + """Update persistent data in the DSHPulumiProject object""" + all_config_dict = { + key: item.value for key, item in self.stack.get_all_config().items() + } + self.pulumi_project.stack_config = all_config_dict + + def update_dsh_pulumi_encrypted_key(self, workspace: automation.Workspace) -> None: + """Update encrypted key in the DSHPulumiProject object""" + stack_key = workspace.stack_settings(stack_name=self.stack_name).encrypted_key + + if not self.pulumi_config.encrypted_key: + self.pulumi_config.encrypted_key = stack_key + elif self.pulumi_config.encrypted_key != stack_key: + msg = "Stack encrypted key does not match project encrypted key" + raise DataSafeHavenPulumiError(msg) + + +class SREProjectManager(ProjectManager): + """Interact with an SRE using Pulumi""" + + def __init__( + self, + context: Context, + config: SREConfig, + pulumi_config: DSHPulumiConfig, + *, + create_project: bool = False, + graph_api_token: str | None = None, + ) -> None: + """Constructor""" + token = graph_api_token or "" + super().__init__( + context, + pulumi_config, + config.name, + DeclarativeSRE(context, config, token), + create_project=create_project, + ) diff --git a/data_safe_haven/logging/__init__.py b/data_safe_haven/logging/__init__.py new file mode 100644 index 0000000000..dc44946c66 --- /dev/null +++ b/data_safe_haven/logging/__init__.py @@ -0,0 +1,19 @@ +from .logger import ( + from_ansi, + get_console_handler, + get_logger, + get_null_logger, + init_logging, + set_console_level, + show_console_level, +) + +__all__ = [ + "from_ansi", + "get_console_handler", + "get_logger", + "get_null_logger", + "init_logging", + "set_console_level", + "show_console_level", +] diff --git a/data_safe_haven/logging/logger.py b/data_safe_haven/logging/logger.py new file mode 100644 index 0000000000..fe468f3b70 --- /dev/null +++ b/data_safe_haven/logging/logger.py @@ -0,0 +1,89 @@ +"""Custom logging functions to interact with Python logging.""" + +import logging +from datetime import UTC, datetime + +from rich.highlighter import NullHighlighter +from rich.logging import RichHandler +from rich.text import Text + +from data_safe_haven.directories import log_dir + +from .non_logging_singleton import NonLoggingSingleton +from .plain_file_handler import PlainFileHandler + + +def from_ansi(logger: logging.Logger, text: str) -> None: + logger.info(Text.from_ansi(text)) + + +def get_console_handler() -> RichHandler: + return next(h for h in get_logger().handlers if isinstance(h, RichHandler)) + + +def get_logger() -> logging.Logger: + return logging.getLogger("data_safe_haven") + + +def get_null_logger() -> logging.Logger: + return NonLoggingSingleton() + + +def init_logging() -> None: + # Configure root logger + # By default logging level is WARNING + root_logger = logging.getLogger(None) + root_logger.setLevel(logging.NOTSET) + + # Configure DSH logger + logger = get_logger() + logger.setLevel(logging.NOTSET) + + # Clear existing handlers + for handler in logger.handlers[:]: + logger.removeHandler(handler) + + console_handler = RichHandler( + level=logging.INFO, + highlighter=NullHighlighter(), + markup=True, + rich_tracebacks=True, + show_time=False, + show_path=False, + show_level=False, + ) + console_handler.setFormatter(logging.Formatter(r"%(message)s")) + + file_handler = PlainFileHandler( + log_dir() / logfile_name(), + delay=True, + encoding="utf8", + mode="a", + ) + file_handler.setFormatter( + logging.Formatter(r"%(asctime)s - %(levelname)s - %(message)s") + ) + file_handler.setLevel(logging.NOTSET) + + # Add handlers + logger.addHandler(console_handler) + logger.addHandler(file_handler) + + # Disable unnecessarily verbose external logging + logging.getLogger("azure.core.pipeline.policies").setLevel(logging.ERROR) + logging.getLogger("azure.identity._credentials").setLevel(logging.ERROR) + logging.getLogger("azure.identity._internal").setLevel(logging.ERROR) + logging.getLogger("azure.mgmt.core.policies").setLevel(logging.ERROR) + logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR) + + +def logfile_name() -> str: + return f"{datetime.now(UTC).date()}.log" + + +def set_console_level(level: int | str) -> None: + get_console_handler().setLevel(level) + + +def show_console_level() -> None: + get_console_handler()._log_render.show_level = True diff --git a/data_safe_haven/logging/non_logging_singleton.py b/data_safe_haven/logging/non_logging_singleton.py new file mode 100644 index 0000000000..05ad60dd24 --- /dev/null +++ b/data_safe_haven/logging/non_logging_singleton.py @@ -0,0 +1,14 @@ +import logging + +from data_safe_haven.singleton import Singleton + + +class NonLoggingSingleton(logging.Logger, metaclass=Singleton): + """ + Non-logging singleton that can be used by anything needing logs to be consumed + """ + + def __init__(self) -> None: + super().__init__(name="non-logger", level=logging.CRITICAL + 10) + while self.handlers: + self.removeHandler(self.handlers[0]) diff --git a/data_safe_haven/logging/plain_file_handler.py b/data_safe_haven/logging/plain_file_handler.py new file mode 100644 index 0000000000..c41d0e5ffc --- /dev/null +++ b/data_safe_haven/logging/plain_file_handler.py @@ -0,0 +1,31 @@ +"""Custom logging handler to interact with Python logging.""" + +import logging +from typing import Any + +from rich.text import Text + + +class PlainFileHandler(logging.FileHandler): + """ + Logging handler that cleans messages before sending them to a log file. + """ + + def __init__(self, *args: Any, **kwargs: Any): + """Constructor""" + super().__init__(*args, **kwargs) + + @staticmethod + def strip_formatting(input_string: str) -> str: + """Strip console markup formatting from a string""" + text = Text.from_markup(input_string) + text.spans = [] + return str(text) + + def emit(self, record: logging.LogRecord) -> None: + """Emit a record without formatting""" + if isinstance(record.msg, Text): + # Convert rich.text.Text objects to strings + record.msg = str(record.msg) + record.msg = self.strip_formatting(record.msg) + super().emit(record) diff --git a/data_safe_haven/provisioning/__init__.py b/data_safe_haven/provisioning/__init__.py new file mode 100644 index 0000000000..53186736d6 --- /dev/null +++ b/data_safe_haven/provisioning/__init__.py @@ -0,0 +1,7 @@ +"""Provisioning for deployed Data Safe Haven infrastructure.""" + +from .sre_provisioning_manager import SREProvisioningManager + +__all__ = [ + "SREProvisioningManager", +] diff --git a/data_safe_haven/provisioning/sre_provisioning_manager.py b/data_safe_haven/provisioning/sre_provisioning_manager.py new file mode 100644 index 0000000000..39c0d4dff1 --- /dev/null +++ b/data_safe_haven/provisioning/sre_provisioning_manager.py @@ -0,0 +1,138 @@ +"""Provisioning manager for a deployed SRE.""" + +import pathlib +from typing import Any + +from data_safe_haven.external import ( + AzureContainerInstance, + AzurePostgreSQLDatabase, + AzureSdk, + GraphApi, +) +from data_safe_haven.infrastructure import SREProjectManager +from data_safe_haven.logging import get_logger +from data_safe_haven.types import AzureLocation, AzureSubscriptionName + + +class SREProvisioningManager: + """Provisioning manager for a deployed SRE.""" + + def __init__( + self, + graph_api_token: str, + location: AzureLocation, + sre_name: str, + sre_stack: SREProjectManager, + subscription_name: AzureSubscriptionName, + timezone: str, + ): + self._available_vm_skus: dict[str, dict[str, Any]] | None = None + self.location = location + self.graph_api = GraphApi.from_token(graph_api_token) + self.logger = get_logger() + self.sre_name = sre_name + self.subscription_name = subscription_name + + # Read secrets from key vault + keyvault_name = sre_stack.output("data")["key_vault_name"] + secret_name = sre_stack.output("data")["password_user_database_admin_secret"] + azure_sdk = AzureSdk(self.subscription_name) + connection_db_server_password = azure_sdk.get_keyvault_secret( + keyvault_name, secret_name + ) + + # Construct remote desktop parameters + self.remote_desktop_params = sre_stack.output("remote_desktop") + self.remote_desktop_params["connection_db_server_password"] = ( + connection_db_server_password + ) + self.remote_desktop_params["timezone"] = timezone + + # Construct security group parameters + self.security_group_params = dict(sre_stack.output("ldap")) + + # Construct VM parameters + self.workspaces = {} + for idx, vm in enumerate(sre_stack.output("workspaces")["vm_outputs"], start=1): + self.workspaces[f"Workspace {idx}"] = { + "cpus": int(self.available_vm_skus[vm["sku"]]["vCPUs"]), + "gpus": int(self.available_vm_skus[vm["sku"]]["GPUs"]), + "ip_address": vm["ip_address"], + "name": vm["name"], + "ram": int(self.available_vm_skus[vm["sku"]]["MemoryGB"]), + "sku": vm["sku"], + } + + @property + def available_vm_skus(self) -> dict[str, dict[str, Any]]: + """Load available VM SKUs for this region""" + if not self._available_vm_skus: + azure_sdk = AzureSdk(self.subscription_name) + self._available_vm_skus = azure_sdk.list_available_vm_skus(self.location) + return self._available_vm_skus + + def create_security_groups(self) -> None: + """Create groups in Entra ID""" + for group_name in self.security_group_params.values(): + self.graph_api.create_group(group_name) + + def restart_remote_desktop_containers(self) -> None: + """Restart the Guacamole container group""" + guacamole_provisioner = AzureContainerInstance( + self.remote_desktop_params["container_group_name"], + self.remote_desktop_params["resource_group_name"], + self.subscription_name, + ) + guacamole_provisioner.restart() + + def update_remote_desktop_connections(self) -> None: + """Update connection information on the Guacamole PostgreSQL server""" + postgres_provisioner = AzurePostgreSQLDatabase( + self.remote_desktop_params["connection_db_name"], + self.remote_desktop_params["connection_db_server_password"], + self.remote_desktop_params["connection_db_server_name"], + self.remote_desktop_params["resource_group_name"], + self.subscription_name, + ) + connection_data = { + "connections": [ + { + "connection_name": ( + f"{vm_identifier} [{vm_details['cpus']} CPU(s)," + f" {vm_details['gpus']} GPU(s), {vm_details['ram']} GB RAM]" + ), + "disable_copy": self.remote_desktop_params["disable_copy"], + "disable_paste": self.remote_desktop_params["disable_paste"], + "ip_address": vm_details["ip_address"], + "timezone": self.remote_desktop_params["timezone"], + } + for vm_identifier, vm_details in self.workspaces.items() + ], + "system_administrator_group_name": self.security_group_params[ + "admin_group_name" + ], + "user_group_name": self.security_group_params["user_group_name"], + } + for details in connection_data["connections"]: + self.logger.info( + f"Adding connection [bold]{details['connection_name']}[/] at [green]{details['ip_address']}[/]." + ) + postgres_script_path = ( + pathlib.Path(__file__).parent.parent + / "resources" + / "remote_desktop" + / "postgresql" + ) + postgres_provisioner.execute_scripts( + [ + postgres_script_path / "init_db.mustache.sql", + postgres_script_path / "update_connections.mustache.sql", + ], + mustache_values=connection_data, + ) + + def run(self) -> None: + """Apply SRE configuration""" + self.create_security_groups() + self.update_remote_desktop_connections() + self.restart_remote_desktop_containers() diff --git a/data_safe_haven/resources/__init__.py b/data_safe_haven/resources/__init__.py new file mode 100644 index 0000000000..ace81a0a09 --- /dev/null +++ b/data_safe_haven/resources/__init__.py @@ -0,0 +1,5 @@ +import pathlib + +resources_path = pathlib.Path(__file__).parent.resolve() + +__all__ = ["resources_path"] diff --git a/data_safe_haven/resources/dns_server/AdGuardHome.mustache.yaml b/data_safe_haven/resources/dns_server/AdGuardHome.mustache.yaml new file mode 100644 index 0000000000..eef58d5a0a --- /dev/null +++ b/data_safe_haven/resources/dns_server/AdGuardHome.mustache.yaml @@ -0,0 +1,24 @@ +users: + - name: {{admin_username}} + password: {{admin_password_encrypted}} +dns: + blocking_mode: nxdomain + upstream_dns: + - "{{upstream_dns}}" + upstream_dns_file: "" + bootstrap_dns: + - "{{upstream_dns}}" +querylog: + enabled: true +filters: +user_rules: + # https://github.com/AdguardTeam/AdGuardHome/wiki/Hosts-Blocklists#adblock-style-syntax + - "*.*" + {{#filter_allow}} + - "@@||{{.}}" + {{/filter_allow}} +log: + verbose: true +# Note that because we are only providing a partial config file we need the +# `schema_version` key or the full set of YAML migrations will get run. +schema_version: 24 diff --git a/data_safe_haven/resources/dns_server/entrypoint.sh b/data_safe_haven/resources/dns_server/entrypoint.sh new file mode 100644 index 0000000000..d79226ff74 --- /dev/null +++ b/data_safe_haven/resources/dns_server/entrypoint.sh @@ -0,0 +1,17 @@ +#! /usr/bin/env sh + +# Copy the read-only config file to the expected location +CONFIG_TARGET="/opt/adguardhome/conf/AdGuardHome.yaml" +echo "$(date '+%Y/%m/%d %H:%M:%S.000000') [info] Copying configuration file to ${CONFIG_TARGET}." +mkdir -p /opt/adguardhome/conf +cp /opt/adguardhome/custom/AdGuardHome.yaml "$CONFIG_TARGET" + +# Run the usual entrypoint with command line arguments +if [ $# -gt 0 ]; then + echo "$(date '+%Y/%m/%d %H:%M:%S.000000') [info] Running AdGuardHome with arguments: $*." + /opt/adguardhome/AdGuardHome "$@" +else + echo "$(date '+%Y/%m/%d %H:%M:%S.000000') [info] Running AdGuardHome with default arguments." + /opt/adguardhome/AdGuardHome --no-check-update -c /opt/adguardhome/conf/AdGuardHome.yaml -w /opt/adguardhome/work +fi + diff --git a/data_safe_haven/resources/gitea/caddy/Caddyfile b/data_safe_haven/resources/gitea/caddy/Caddyfile new file mode 100644 index 0000000000..0bef301196 --- /dev/null +++ b/data_safe_haven/resources/gitea/caddy/Caddyfile @@ -0,0 +1,14 @@ +# Refer to the Caddy docs for more information: +# https://caddyserver.com/docs/caddyfile +{ + log { + format console { + level_format upper + } + level DEBUG + } +} + +:80 { + reverse_proxy http://localhost:3000 +} diff --git a/data_safe_haven/resources/gitea/gitea/configure.mustache.sh b/data_safe_haven/resources/gitea/gitea/configure.mustache.sh new file mode 100644 index 0000000000..4108c5c9dd --- /dev/null +++ b/data_safe_haven/resources/gitea/gitea/configure.mustache.sh @@ -0,0 +1,22 @@ +#! /usr/bin/env bash + +# Ensure that default admin user exists +until su-exec "$USER" /usr/local/bin/gitea admin user list --admin | grep "{{admin_username}}" > /dev/null 2>&1; do + echo "$(date -Iseconds) Attempting to create default admin user '{{admin_username}}'..." | tee -a /var/log/configuration + su-exec "$USER" /usr/local/bin/gitea admin user create --admin --username "{{admin_username}}" --random-password --random-password-length 20 --email "{{admin_email}}" 2> /dev/null + sleep 1 +done + +# Ensure that LDAP authentication is enabled +until su-exec "$USER" /usr/local/bin/gitea admin auth list | grep "DataSafeHavenLDAP" > /dev/null 2>&1; do + echo "$(date -Iseconds) Attempting to register LDAP authentication..." | tee -a /var/log/configuration + su-exec "$USER" /usr/local/bin/gitea admin auth add-ldap \ + --name DataSafeHavenLDAP \ + --security-protocol "unencrypted" \ + --host "{{ldap_server_hostname}}" \ + --port "{{ldap_server_port}}" \ + --user-search-base "{{ldap_user_search_base}}" \ + --user-filter "(&{{{ldap_user_filter}}}({{ldap_username_attribute}}=%[1]s))" \ + --email-attribute "mail" + sleep 1 +done diff --git a/data_safe_haven/resources/gitea/gitea/entrypoint.sh b/data_safe_haven/resources/gitea/gitea/entrypoint.sh new file mode 100644 index 0000000000..ee58e49337 --- /dev/null +++ b/data_safe_haven/resources/gitea/gitea/entrypoint.sh @@ -0,0 +1,9 @@ +#! /usr/bin/env sh + +# Add configuration as an s6 target +mkdir -p /etc/s6/setup +rm /etc/s6/setup/run 2> /dev/null +ln -s /app/custom/configure.sh /etc/s6/setup/run + +# Run the usual entrypoint +/usr/bin/entrypoint diff --git a/data_safe_haven/resources/hedgedoc/caddy/Caddyfile b/data_safe_haven/resources/hedgedoc/caddy/Caddyfile new file mode 100644 index 0000000000..0bef301196 --- /dev/null +++ b/data_safe_haven/resources/hedgedoc/caddy/Caddyfile @@ -0,0 +1,14 @@ +# Refer to the Caddy docs for more information: +# https://caddyserver.com/docs/caddyfile +{ + log { + format console { + level_format upper + } + level DEBUG + } +} + +:80 { + reverse_proxy http://localhost:3000 +} diff --git a/data_safe_haven/resources/hedgedoc/hedgedoc/config.json b/data_safe_haven/resources/hedgedoc/hedgedoc/config.json new file mode 100644 index 0000000000..4ecd8c27fe --- /dev/null +++ b/data_safe_haven/resources/hedgedoc/hedgedoc/config.json @@ -0,0 +1,12 @@ +{ + "production": { + "db": { + "dialectOptions": { + "ssl": { + "require": true, + "rejectUnauthorized": false + } + } + } + } +} diff --git a/data_safe_haven/resources/remote_desktop/caddy/Caddyfile b/data_safe_haven/resources/remote_desktop/caddy/Caddyfile new file mode 100644 index 0000000000..7ef0327056 --- /dev/null +++ b/data_safe_haven/resources/remote_desktop/caddy/Caddyfile @@ -0,0 +1,15 @@ +# Refer to the Caddy docs for more information: +# https://caddyserver.com/docs/caddyfile +{ + log { + format console { + level_format upper + } + level DEBUG + } +} + +:80 { + rewrite / /guacamole + reverse_proxy http://localhost:8080 +} diff --git a/data_safe_haven/resources/remote_desktop/postgresql/init_db.mustache.sql b/data_safe_haven/resources/remote_desktop/postgresql/init_db.mustache.sql new file mode 100644 index 0000000000..0607faded9 --- /dev/null +++ b/data_safe_haven/resources/remote_desktop/postgresql/init_db.mustache.sql @@ -0,0 +1,773 @@ +-- +-- Connection group types +-- + +DO $$ BEGIN + CREATE TYPE guacamole_connection_group_type AS ENUM( + 'ORGANIZATIONAL', + 'BALANCING' + ); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- +-- Entity types +-- + +DO $$ BEGIN + CREATE TYPE guacamole_entity_type AS ENUM( + 'USER', + 'USER_GROUP' + ); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- +-- Object permission types +-- + +DO $$ BEGIN + CREATE TYPE guacamole_object_permission_type AS ENUM( + 'READ', + 'UPDATE', + 'DELETE', + 'ADMINISTER' + ); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- +-- System permission types +-- + +DO $$ BEGIN + CREATE TYPE guacamole_system_permission_type AS ENUM( + 'CREATE_CONNECTION', + 'CREATE_CONNECTION_GROUP', + 'CREATE_SHARING_PROFILE', + 'CREATE_USER', + 'CREATE_USER_GROUP', + 'ADMINISTER' + ); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- +-- Guacamole proxy (guacd) encryption methods +-- + +DO $$ BEGIN + CREATE TYPE guacamole_proxy_encryption_method AS ENUM( + 'NONE', + 'SSL' + ); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- +-- Table of connection groups. Each connection group has a name. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_group ( + + connection_group_id serial NOT NULL, + parent_id integer, + connection_group_name varchar(128) NOT NULL, + type guacamole_connection_group_type + NOT NULL DEFAULT 'ORGANIZATIONAL', + + -- Concurrency limits + max_connections integer, + max_connections_per_user integer, + enable_session_affinity boolean NOT NULL DEFAULT FALSE, + + PRIMARY KEY (connection_group_id), + + CONSTRAINT connection_group_name_parent + UNIQUE (connection_group_name, parent_id), + + CONSTRAINT guacamole_connection_group_ibfk_1 + FOREIGN KEY (parent_id) + REFERENCES guacamole_connection_group (connection_group_id) + ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_group_parent_id + ON guacamole_connection_group(parent_id); + +-- +-- Table of connections. Each connection has a name, protocol, and +-- associated set of parameters. +-- A connection may belong to a connection group. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection ( + + connection_id serial NOT NULL, + connection_name varchar(128) NOT NULL, + parent_id integer, + protocol varchar(32) NOT NULL, + + -- Concurrency limits + max_connections integer, + max_connections_per_user integer, + + -- Connection Weight + connection_weight integer, + failover_only boolean NOT NULL DEFAULT FALSE, + + -- Guacamole proxy (guacd) overrides + proxy_port integer, + proxy_hostname varchar(512), + proxy_encryption_method guacamole_proxy_encryption_method, + + PRIMARY KEY (connection_id), + + CONSTRAINT connection_name_parent + UNIQUE (connection_name, parent_id), + + CONSTRAINT guacamole_connection_ibfk_1 + FOREIGN KEY (parent_id) + REFERENCES guacamole_connection_group (connection_group_id) + ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_parent_id + ON guacamole_connection(parent_id); + +-- +-- Table of base entities which may each be either a user or user group. Other +-- tables which represent qualities shared by both users and groups will point +-- to guacamole_entity, while tables which represent qualities specific to +-- users or groups will point to guacamole_user or guacamole_user_group. +-- + +CREATE TABLE IF NOT EXISTS guacamole_entity ( + + entity_id serial NOT NULL, + name varchar(128) NOT NULL, + type guacamole_entity_type NOT NULL, + + PRIMARY KEY (entity_id), + + CONSTRAINT guacamole_entity_name_scope + UNIQUE (type, name) + +); + +-- +-- Table of users. Each user has a unique username and a hashed password +-- with corresponding salt. Although the authentication system will always set +-- salted passwords, other systems may set unsalted passwords by simply not +-- providing the salt. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user ( + + user_id serial NOT NULL, + entity_id integer NOT NULL, + + -- Optionally-salted password + password_hash bytea NOT NULL, + password_salt bytea, + password_date timestamptz NOT NULL, + + -- Account disabled/expired status + disabled boolean NOT NULL DEFAULT FALSE, + expired boolean NOT NULL DEFAULT FALSE, + + -- Time-based access restriction + access_window_start time, + access_window_end time, + + -- Date-based access restriction + valid_from date, + valid_until date, + + -- Timezone used for all date/time comparisons and interpretation + timezone varchar(64), + + -- Profile information + full_name varchar(256), + email_address varchar(256), + organization varchar(256), + organizational_role varchar(256), + + PRIMARY KEY (user_id), + + CONSTRAINT guacamole_user_single_entity + UNIQUE (entity_id), + + CONSTRAINT guacamole_user_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) + ON DELETE CASCADE + +); + +-- +-- Table of user groups. Each user group may have an arbitrary set of member +-- users and member groups, with those members inheriting the permissions +-- granted to that group. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_group ( + + user_group_id serial NOT NULL, + entity_id integer NOT NULL, + + -- Group disabled status + disabled boolean NOT NULL DEFAULT FALSE, + + PRIMARY KEY (user_group_id), + + CONSTRAINT guacamole_user_group_single_entity + UNIQUE (entity_id), + + CONSTRAINT guacamole_user_group_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) + ON DELETE CASCADE + +); + +-- +-- Table of users which are members of given user groups. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_group_member ( + + user_group_id integer NOT NULL, + member_entity_id integer NOT NULL, + + PRIMARY KEY (user_group_id, member_entity_id), + + -- Parent must be a user group + CONSTRAINT guacamole_user_group_member_parent + FOREIGN KEY (user_group_id) + REFERENCES guacamole_user_group (user_group_id) ON DELETE CASCADE, + + -- Member may be either a user or a user group (any entity) + CONSTRAINT guacamole_user_group_member_entity + FOREIGN KEY (member_entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +-- +-- Table of sharing profiles. Each sharing profile has a name, associated set +-- of parameters, and a primary connection. The primary connection is the +-- connection that the sharing profile shares, and the parameters dictate the +-- restrictions/features which apply to the user joining the connection via the +-- sharing profile. +-- + +CREATE TABLE IF NOT EXISTS guacamole_sharing_profile ( + + sharing_profile_id serial NOT NULL, + sharing_profile_name varchar(128) NOT NULL, + primary_connection_id integer NOT NULL, + + PRIMARY KEY (sharing_profile_id), + + CONSTRAINT sharing_profile_name_primary + UNIQUE (sharing_profile_name, primary_connection_id), + + CONSTRAINT guacamole_sharing_profile_ibfk_1 + FOREIGN KEY (primary_connection_id) + REFERENCES guacamole_connection (connection_id) + ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_sharing_profile_primary_connection_id + ON guacamole_sharing_profile(primary_connection_id); + +-- +-- Table of connection parameters. Each parameter is simply a name/value pair +-- associated with a connection. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_parameter ( + + connection_id integer NOT NULL, + parameter_name varchar(128) NOT NULL, + parameter_value varchar(4096) NOT NULL, + + PRIMARY KEY (connection_id,parameter_name), + + CONSTRAINT guacamole_connection_parameter_ibfk_1 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_parameter_connection_id + ON guacamole_connection_parameter(connection_id); + +-- +-- Table of sharing profile parameters. Each parameter is simply +-- name/value pair associated with a sharing profile. These parameters dictate +-- the restrictions/features which apply to the user joining the associated +-- connection via the sharing profile. +-- + +CREATE TABLE IF NOT EXISTS guacamole_sharing_profile_parameter ( + + sharing_profile_id integer NOT NULL, + parameter_name varchar(128) NOT NULL, + parameter_value varchar(4096) NOT NULL, + + PRIMARY KEY (sharing_profile_id, parameter_name), + + CONSTRAINT guacamole_sharing_profile_parameter_ibfk_1 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_sharing_profile_parameter_sharing_profile_id + ON guacamole_sharing_profile_parameter(sharing_profile_id); + +-- +-- Table of arbitrary user attributes. Each attribute is simply a name/value +-- pair associated with a user. Arbitrary attributes are defined by other +-- extensions. Attributes defined by this extension will be mapped to +-- properly-typed columns of a specific table. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_attribute ( + + user_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (user_id, attribute_name), + + CONSTRAINT guacamole_user_attribute_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_user_attribute_user_id + ON guacamole_user_attribute(user_id); + +-- +-- Table of arbitrary user group attributes. Each attribute is simply a +-- name/value pair associated with a user group. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_group_attribute ( + + user_group_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (user_group_id, attribute_name), + + CONSTRAINT guacamole_user_group_attribute_ibfk_1 + FOREIGN KEY (user_group_id) + REFERENCES guacamole_user_group (user_group_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_user_group_attribute_user_group_id + ON guacamole_user_group_attribute(user_group_id); + +-- +-- Table of arbitrary connection attributes. Each attribute is simply a +-- name/value pair associated with a connection. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_attribute ( + + connection_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (connection_id, attribute_name), + + CONSTRAINT guacamole_connection_attribute_ibfk_1 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_attribute_connection_id + ON guacamole_connection_attribute(connection_id); + +-- +-- Table of arbitrary connection group attributes. Each attribute is simply a +-- name/value pair associated with a connection group. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_group_attribute ( + + connection_group_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (connection_group_id, attribute_name), + + CONSTRAINT guacamole_connection_group_attribute_ibfk_1 + FOREIGN KEY (connection_group_id) + REFERENCES guacamole_connection_group (connection_group_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_group_attribute_connection_group_id + ON guacamole_connection_group_attribute(connection_group_id); + +-- +-- Table of arbitrary sharing profile attributes. Each attribute is simply a +-- name/value pair associated with a sharing profile. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE IF NOT EXISTS guacamole_sharing_profile_attribute ( + + sharing_profile_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (sharing_profile_id, attribute_name), + + CONSTRAINT guacamole_sharing_profile_attribute_ibfk_1 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_sharing_profile_attribute_sharing_profile_id + ON guacamole_sharing_profile_attribute(sharing_profile_id); + +-- +-- Table of connection permissions. Each connection permission grants a user or +-- user group specific access to a connection. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_permission ( + + entity_id integer NOT NULL, + connection_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, connection_id, permission), + + CONSTRAINT guacamole_connection_permission_ibfk_1 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_connection_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_permission_connection_id + ON guacamole_connection_permission(connection_id); + +CREATE INDEX IF NOT EXISTS guacamole_connection_permission_entity_id + ON guacamole_connection_permission(entity_id); + +-- +-- Table of connection group permissions. Each group permission grants a user +-- or user group specific access to a connection group. +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_group_permission ( + + entity_id integer NOT NULL, + connection_group_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, connection_group_id, permission), + + CONSTRAINT guacamole_connection_group_permission_ibfk_1 + FOREIGN KEY (connection_group_id) + REFERENCES guacamole_connection_group (connection_group_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_connection_group_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_group_permission_connection_group_id + ON guacamole_connection_group_permission(connection_group_id); + +CREATE INDEX IF NOT EXISTS guacamole_connection_group_permission_entity_id + ON guacamole_connection_group_permission(entity_id); + +-- +-- Table of sharing profile permissions. Each sharing profile permission grants +-- a user or user group specific access to a sharing profile. +-- + +CREATE TABLE IF NOT EXISTS guacamole_sharing_profile_permission ( + + entity_id integer NOT NULL, + sharing_profile_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, sharing_profile_id, permission), + + CONSTRAINT guacamole_sharing_profile_permission_ibfk_1 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_sharing_profile_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_sharing_profile_permission_sharing_profile_id + ON guacamole_sharing_profile_permission(sharing_profile_id); + +CREATE INDEX IF NOT EXISTS guacamole_sharing_profile_permission_entity_id + ON guacamole_sharing_profile_permission(entity_id); + +-- +-- Table of system permissions. Each system permission grants a user or user +-- group a system-level privilege of some kind. +-- + +CREATE TABLE IF NOT EXISTS guacamole_system_permission ( + + entity_id integer NOT NULL, + permission guacamole_system_permission_type NOT NULL, + + PRIMARY KEY (entity_id, permission), + + CONSTRAINT guacamole_system_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_system_permission_entity_id + ON guacamole_system_permission(entity_id); + +-- +-- Table of user permissions. Each user permission grants a user or user group +-- access to another user (the "affected" user) for a specific type of +-- operation. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_permission ( + + entity_id integer NOT NULL, + affected_user_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, affected_user_id, permission), + + CONSTRAINT guacamole_user_permission_ibfk_1 + FOREIGN KEY (affected_user_id) + REFERENCES guacamole_user (user_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_user_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_user_permission_affected_user_id + ON guacamole_user_permission(affected_user_id); + +CREATE INDEX IF NOT EXISTS guacamole_user_permission_entity_id + ON guacamole_user_permission(entity_id); + +-- +-- Table of user group permissions. Each user group permission grants a user +-- or user group access to a another user group (the "affected" user group) for +-- a specific type of operation. +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_group_permission ( + + entity_id integer NOT NULL, + affected_user_group_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, affected_user_group_id, permission), + + CONSTRAINT guacamole_user_group_permission_affected_user_group + FOREIGN KEY (affected_user_group_id) + REFERENCES guacamole_user_group (user_group_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_user_group_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_user_group_permission_affected_user_group_id + ON guacamole_user_group_permission(affected_user_group_id); + +CREATE INDEX IF NOT EXISTS guacamole_user_group_permission_entity_id + ON guacamole_user_group_permission(entity_id); + +-- +-- Table of connection history records. Each record defines a specific user's +-- session, including the connection used, the start time, and the end time +-- (if any). +-- + +CREATE TABLE IF NOT EXISTS guacamole_connection_history ( + + history_id serial NOT NULL, + user_id integer DEFAULT NULL, + username varchar(128) NOT NULL, + remote_host varchar(256) DEFAULT NULL, + connection_id integer DEFAULT NULL, + connection_name varchar(128) NOT NULL, + sharing_profile_id integer DEFAULT NULL, + sharing_profile_name varchar(128) DEFAULT NULL, + start_date timestamptz NOT NULL, + end_date timestamptz DEFAULT NULL, + + PRIMARY KEY (history_id), + + CONSTRAINT guacamole_connection_history_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE SET NULL, + + CONSTRAINT guacamole_connection_history_ibfk_2 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE SET NULL, + + CONSTRAINT guacamole_connection_history_ibfk_3 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE SET NULL + +); + +CREATE INDEX IF NOT EXISTS guacamole_connection_history_user_id + ON guacamole_connection_history(user_id); + +CREATE INDEX IF NOT EXISTS guacamole_connection_history_connection_id + ON guacamole_connection_history(connection_id); + +CREATE INDEX IF NOT EXISTS guacamole_connection_history_sharing_profile_id + ON guacamole_connection_history(sharing_profile_id); + +CREATE INDEX IF NOT EXISTS guacamole_connection_history_start_date + ON guacamole_connection_history(start_date); + +CREATE INDEX IF NOT EXISTS guacamole_connection_history_end_date + ON guacamole_connection_history(end_date); + +CREATE INDEX IF NOT EXISTS guacamole_connection_history_connection_id_start_date + ON guacamole_connection_history(connection_id, start_date); + +-- +-- User login/logout history +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_history ( + + history_id serial NOT NULL, + user_id integer DEFAULT NULL, + username varchar(128) NOT NULL, + remote_host varchar(256) DEFAULT NULL, + start_date timestamptz NOT NULL, + end_date timestamptz DEFAULT NULL, + + PRIMARY KEY (history_id), + + CONSTRAINT guacamole_user_history_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE SET NULL + +); + +CREATE INDEX IF NOT EXISTS guacamole_user_history_user_id + ON guacamole_user_history(user_id); + +CREATE INDEX IF NOT EXISTS guacamole_user_history_start_date + ON guacamole_user_history(start_date); + +CREATE INDEX IF NOT EXISTS guacamole_user_history_end_date + ON guacamole_user_history(end_date); + +CREATE INDEX IF NOT EXISTS guacamole_user_history_user_id_start_date + ON guacamole_user_history(user_id, start_date); + +-- +-- User password history +-- + +CREATE TABLE IF NOT EXISTS guacamole_user_password_history ( + + password_history_id serial NOT NULL, + user_id integer NOT NULL, + + -- Salted password + password_hash bytea NOT NULL, + password_salt bytea, + password_date timestamptz NOT NULL, + + PRIMARY KEY (password_history_id), + + CONSTRAINT guacamole_user_password_history_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE CASCADE + +); + +CREATE INDEX IF NOT EXISTS guacamole_user_password_history_user_id + ON guacamole_user_password_history(user_id); + +-- +-- Set up user groups +-- + +-- Create user group entities +INSERT INTO + guacamole_entity (name, type) +VALUES + ('{{system_administrator_group_name}}', 'USER_GROUP') +ON CONFLICT DO NOTHING; + +-- Create user groups +INSERT INTO + guacamole_user_group (entity_id) +SELECT entity_id +FROM + guacamole_entity +WHERE + guacamole_entity.type = 'USER_GROUP' +ON CONFLICT DO NOTHING; + +-- Grant administration permissions to members of the system administrators group +INSERT INTO guacamole_system_permission (entity_id, permission) +SELECT entity_id, permission :: guacamole_system_permission_type +FROM + ( + VALUES + ('{{system_administrator_group_name}}', 'CREATE_CONNECTION'), + ('{{system_administrator_group_name}}', 'CREATE_CONNECTION_GROUP'), + ('{{system_administrator_group_name}}', 'CREATE_SHARING_PROFILE'), + ('{{system_administrator_group_name}}', 'CREATE_USER'), + ('{{system_administrator_group_name}}', 'CREATE_USER_GROUP'), + ('{{system_administrator_group_name}}', 'ADMINISTER') + ) group_permissions (username, permission) + JOIN guacamole_entity ON group_permissions.username = guacamole_entity.name AND guacamole_entity.type = 'USER_GROUP' +ON CONFLICT DO NOTHING; diff --git a/data_safe_haven/resources/remote_desktop/postgresql/list_users.mustache.sql b/data_safe_haven/resources/remote_desktop/postgresql/list_users.mustache.sql new file mode 100644 index 0000000000..b15756796f --- /dev/null +++ b/data_safe_haven/resources/remote_desktop/postgresql/list_users.mustache.sql @@ -0,0 +1,10 @@ +SELECT + entity_user.name, + guacamole_user.email_address +FROM guacamole_user_group_member + JOIN guacamole_user_group ON guacamole_user_group_member.user_group_id = guacamole_user_group.user_group_id + JOIN guacamole_entity AS entity_group ON entity_group.entity_id = guacamole_user_group.entity_id + JOIN guacamole_user ON guacamole_user.entity_id = guacamole_user_group_member.member_entity_id + JOIN guacamole_entity AS entity_user ON entity_user.entity_id = guacamole_user.entity_id +WHERE + entity_group.name = '{{group_name}}'; \ No newline at end of file diff --git a/data_safe_haven/resources/remote_desktop/postgresql/update_connections.mustache.sql b/data_safe_haven/resources/remote_desktop/postgresql/update_connections.mustache.sql new file mode 100644 index 0000000000..a46eed04e9 --- /dev/null +++ b/data_safe_haven/resources/remote_desktop/postgresql/update_connections.mustache.sql @@ -0,0 +1,54 @@ +-- Require that connection names are unique +ALTER TABLE guacamole_connection DROP CONSTRAINT IF EXISTS connection_name_constraint; +ALTER TABLE guacamole_connection ADD CONSTRAINT connection_name_constraint UNIQUE (connection_name); + +-- Remove all connections (NB. this will cascade delete guacamole_connection_parameter entries) +TRUNCATE guacamole_connection CASCADE; + +-- Add entries for RDP and ssh for each specified connection +{{#connections}} +INSERT INTO + guacamole_connection (connection_name, protocol) +VALUES + ('Desktop: {{connection_name}}', 'rdp'), + ('SSH: {{connection_name}}', 'ssh') +ON CONFLICT DO NOTHING; +{{/connections}} + +-- Add connection details +{{#connections}} +INSERT INTO + guacamole_connection_parameter (connection_id, parameter_name, parameter_value) +SELECT + connection_id, + parameter_name, + parameter_value +FROM + ( + VALUES + ('clipboard-encoding', 'UTF-8'), + ('disable-copy', '{{disable_copy}}'), + ('disable-paste', '{{disable_paste}}'), + ('hostname', '{{ip_address}}'), + ('server-layout', 'en-gb-qwerty'), + ('timezone', '{{timezone}}') + ) connection_settings (parameter_name, parameter_value) + JOIN guacamole_connection ON guacamole_connection.connection_name LIKE '% {{connection_name}}' +ON CONFLICT DO NOTHING; +{{/connections}} + +-- Grant appropriate connection permissions to each group +INSERT INTO guacamole_connection_permission (entity_id, connection_id, permission) + SELECT entity_id, connection_id, permission::guacamole_object_permission_type + FROM + ( + VALUES + ('{{system_administrator_group_name}}', 'READ'), + ('{{system_administrator_group_name}}', 'UPDATE'), + ('{{system_administrator_group_name}}', 'DELETE'), + ('{{system_administrator_group_name}}', 'ADMINISTER'), + ('{{user_group_name}}', 'READ') + ) group_permissions (username, permission) + CROSS JOIN guacamole_connection + JOIN guacamole_entity ON group_permissions.username = guacamole_entity.name +ON CONFLICT DO NOTHING; diff --git a/data_safe_haven/resources/software_repositories/allowlists/cran.allowlist b/data_safe_haven/resources/software_repositories/allowlists/cran.allowlist new file mode 100644 index 0000000000..9624ec7060 --- /dev/null +++ b/data_safe_haven/resources/software_repositories/allowlists/cran.allowlist @@ -0,0 +1,24 @@ +DBI +MASS +RPostgres +Rcpp +bit +bit64 +blob +cli +cli +cpp11 +data.table +generics +glue +hms +lifecycle +lubridate +odbc +pkgconfig +plogr +rlang +timechange +vctrs +withr + diff --git a/data_safe_haven/resources/software_repositories/allowlists/pypi.allowlist b/data_safe_haven/resources/software_repositories/allowlists/pypi.allowlist new file mode 100644 index 0000000000..704937893f --- /dev/null +++ b/data_safe_haven/resources/software_repositories/allowlists/pypi.allowlist @@ -0,0 +1,21 @@ +backports.zoneinfo +contourpy +cycler +fonttools +kiwisolver +matplotlib +numpy +packaging +pandas +pillow +psycopg +psycopg-binary +pymssql +pyodbc +pyparsing +python-dateutil +pytz +scikit-learn +six +typing-extensions +tzdata diff --git a/data_safe_haven/resources/software_repositories/caddy/Caddyfile b/data_safe_haven/resources/software_repositories/caddy/Caddyfile new file mode 100644 index 0000000000..ee134d83eb --- /dev/null +++ b/data_safe_haven/resources/software_repositories/caddy/Caddyfile @@ -0,0 +1,17 @@ +# Refer to the Caddy docs for more information: +# https://caddyserver.com/docs/caddyfile +{ + log { + format console { + level_format upper + } + level DEBUG + } +} + +:80 { + reverse_proxy http://localhost:8081 { + @error status 401 + replace_status @error 403 + } +} diff --git a/data_safe_haven/resources/update_server/update_server_linux.cloud_init.yaml b/data_safe_haven/resources/update_server/update_server_linux.cloud_init.yaml new file mode 100644 index 0000000000..c30388d13a --- /dev/null +++ b/data_safe_haven/resources/update_server/update_server_linux.cloud_init.yaml @@ -0,0 +1,33 @@ +#cloud-config + +write_files: + - path: "/etc/squid-deb-proxy/mirror-dstdomain.acl.d/20-data-safe-haven" + content: | + # Additional mirror domains that are allowed by this cache + apt.postgresql.org + azure.archive.ubuntu.com + d20rj4el6vkp4c.cloudfront.net + dbeaver.io + developer.download.nvidia.com + packages.gitlab.com + packages.microsoft.com + qgis.org + ubuntu.qgis.org + +# Install necessary apt packages +packages: + - auditd + - clamav + - clamav-base + - clamav-daemon + - clamav-freshclam + - clamav-unofficial-sigs + - squid-deb-proxy +package_update: true +package_upgrade: true + +runcmd: + # Remove the unnecessary squid service and prevent it from running + - service squid stop + - systemctl disable -f squid + - update-rc.d -f squid remove diff --git a/data_safe_haven/resources/workspace/ansible/desired_state.yaml b/data_safe_haven/resources/workspace/ansible/desired_state.yaml new file mode 100644 index 0000000000..ff5df392a3 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/desired_state.yaml @@ -0,0 +1,326 @@ +--- +- name: Desired state configuration + hosts: localhost + become: true + + tasks: + - name: Update package cache + tags: apt + ansible.builtin.apt: + update_cache: true + cache_valid_time: 600 + + - name: List apt packages to install + tags: apt + ansible.builtin.debug: + msg: "{{ apt_packages.common | union(apt_packages[ansible_facts.distribution_release]) }}" + + - name: Install apt packages + tags: apt + ansible.builtin.apt: + name: "{{ apt_packages.common | union(apt_packages[ansible_facts.distribution_release]) }}" + state: present + async: 3600 + poll: 30 + + - name: Install deb packages + tags: apt + ansible.builtin.script: + executable: /bin/bash + cmd: /desired_state/install_deb.sh "{{ item.source }}" "{{ item.filename }}" "{{ item.sha256 }}" + creates: "{{ item.creates }}" + loop: "{{ deb_packages[ansible_facts.distribution_release] }}" + + - name: Install snap packages + community.general.snap: + name: "{{ item.name }}" + classic: "{{ item.classic }}" + state: present + loop: "{{ snap_packages }}" + + # https://ubuntu.com/server/docs/nvidia-drivers-installation#installing-the-drivers-on-servers-andor-for-computing-purposes + - name: Use ubuntu-drivers to install Nvidia drivers # noqa: no-handler + tags: nvidia + ansible.builtin.command: + cmd: ubuntu-drivers install --gpgpu + creates: /usr/bin/nvidia-smi + + - name: Disable and stop Ubuntu Pro services + ansible.builtin.systemd: + name: "{{ item }}" + state: stopped + enabled: false + loop: + - apt-news + - esm-cache + + - name: Enable bash autocompletion globally + ansible.builtin.blockinfile: + path: /etc/bash.bashrc + block: | + # enable bash completion in interactive shells + if [ ! $(shopt -oq posix) ]; then + if [ -f /usr/share/bash-completion/bash_completion ]; then + . /usr/share/bash-completion/bash_completion + elif [ -f /etc/bash_completion ]; then + . /etc/bash_completion + fi + fi + + - name: Copy bashrc skeleton + ansible.builtin.copy: + src: etc/skel/bashrc + dest: /etc/skel/.bashrc + mode: '0755' + + - name: Copy xsession skeleton + ansible.builtin.copy: + src: etc/skel/xsession + dest: /etc/skel/.xsession + mode: '0444' + + - name: Add ldap to /etc/nsswitch.conf + ansible.builtin.replace: + path: /etc/nsswitch.conf + regexp: '^(passwd|group|shadow)(:.*)(? /etc/audit/rules.d/50-privileged.rules + creates: /etc/audit/rules.d/50-privileged.rules + notify: Restart auditd + + - name: Copy ClamAV configuration + ansible.builtin.copy: + src: etc/clamav/clamd.conf + dest: /etc/clamav/clamd.conf + mode: '0444' + owner: clamav + group: adm + + - name: Set freshclam private mirror + ansible.builtin.lineinfile: + path: /etc/clamav/freshclam.conf + line: "{{ lookup('file', '/etc/clamav/freshclam-mirror.conf') }}" + state: present + + # This is required to fetch definitions for the clamav daemon to run + - name: Initial freshclam run # noqa: command-instead-of-module + ansible.builtin.shell: + cmd: | + systemctl stop clamav-freshclam && freshclam && systemctl start clamav-freshclam + creates: '/var/lib/clamav/main.{c[vl]d,inc}' + + - name: Install ClamAV services and timer + ansible.builtin.copy: + src: "{{ item }}" + dest: /etc/systemd/system/ + mode: '0644' + with_fileglob: 'etc/systemd/system/clamav*' + notify: Systemd daemon reload + + - name: Enable and start freshclam + ansible.builtin.systemd: + name: clamav-freshclam + state: started + enabled: true + + - name: Enable and start ClamAV daemon + ansible.builtin.systemd: + name: clamav-daemon + enabled: true + state: started + + - name: Enable and start ClamAV on access scan + ansible.builtin.systemd: + name: clamav-clamonacc + enabled: true + state: started + + - name: Enable and start ClamAV timer + ansible.builtin.systemd: + name: clamav-clamdscan.timer + enabled: true + state: started + + - name: Copy smoke test files + ansible.builtin.copy: + src: "{{ item }}" + dest: /usr/local/smoke_tests/ + mode: '0755' + with_fileglob: 'usr/local/smoke_tests/*' + + + handlers: + - name: Restart auditd + ansible.builtin.systemd: + name: auditd + state: restarted + + - name: Restart sshd + ansible.builtin.systemd: + name: sshd + state: restarted + + - name: Update PAM auth # noqa: no-changed-when + ansible.builtin.command: + cmd: pam-auth-update --enable mkhomedir ldap + + # Run systemd daemon-reload. + # https://www.freedesktop.org/software/systemd/man/systemctl.html#daemon-reload + # Should be called when changes are made to .service or .timer files + - name: Systemd daemon reload + ansible.builtin.systemd: + daemon_reload: true diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/clamav/clamd.conf b/data_safe_haven/resources/workspace/ansible/files/etc/clamav/clamd.conf new file mode 100644 index 0000000000..86605227fa --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/clamav/clamd.conf @@ -0,0 +1,12 @@ +# Path to a local socket file the daemon will listen on. +LocalSocket /tmp/clamd.socket +# Sets the permissions on the unix socket to the specified mode. +LocalSocketMode 660 +# Prevent access to infected files for normal users +OnAccessExcludeUname clamav +OnAccessExcludeRootUID yes +OnAccessIncludePath /data +OnAccessIncludePath /home +OnAccessIncludePath /output +OnAccessIncludePath /shared +OnAccessPrevention yes diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/polkit-1/localauthority/50-local.d/50-colord.pkla b/data_safe_haven/resources/workspace/ansible/files/etc/polkit-1/localauthority/50-local.d/50-colord.pkla new file mode 100644 index 0000000000..ebb66f5c35 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/polkit-1/localauthority/50-local.d/50-colord.pkla @@ -0,0 +1,6 @@ +[Allow colord for all users] +Identity=unix-user:* +Action=org.freedesktop.color-manager.create-device;org.freedesktop.color-manager.create-profile;org.freedesktop.color-manager.delete-device;org.freedesktop.color-manager.delete-profile;org.freedesktop.color-manager.modify-device;org.freedesktop.color-manager.modify-profile;org.freedesktop.packagekit.system-sources-refresh +ResultAny=yes +ResultInactive=yes +ResultActive=yes diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/skel/bashrc b/data_safe_haven/resources/workspace/ansible/files/etc/skel/bashrc new file mode 100644 index 0000000000..458c4f81e8 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/skel/bashrc @@ -0,0 +1,46 @@ +# ~/.bashrc: executed by bash(1) for non-login shells. +# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) +# for examples + +# If not running interactively, don't do anything +case $- in + *i*) ;; + *) return;; +esac + +# don't put duplicate lines or lines starting with space in the history. +# See bash(1) for more options +HISTCONTROL=ignoreboth + +# append to the history file, don't overwrite it +shopt -s histappend + +# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) +HISTSIZE=1000 +HISTFILESIZE=2000 + +# check the window size after each command and, if necessary, +# update the values of LINES and COLUMNS. +shopt -s checkwinsize + +# determine if terminal has color support +case "$TERM" in + xterm-color|*-256color) color_prompt=yes;; +esac + +# set prompt +if [ "$color_prompt" = yes ]; then + PS1='\[\033[01;32m\]\u\[\033[00m\]:\[\033[01;34m\]\W\[\033[00m\]\$ ' +else + PS1='\u:\W\$ ' +fi +unset color_prompt + +# enable color support for ls and grep +if [ -x /usr/bin/dircolors ]; then + test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" + alias ls='ls --color=auto' + alias grep='grep --color=auto' + alias fgrep='fgrep --color=auto' + alias egrep='egrep --color=auto' +fi diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/skel/xsession b/data_safe_haven/resources/workspace/ansible/files/etc/skel/xsession new file mode 100644 index 0000000000..689630b2e5 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/skel/xsession @@ -0,0 +1 @@ +xfce4-session diff --git a/deployment/common/resources/clamav-clamdscan.service b/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamdscan.service similarity index 72% rename from deployment/common/resources/clamav-clamdscan.service rename to data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamdscan.service index 688d744b37..f54f75cb49 100644 --- a/deployment/common/resources/clamav-clamdscan.service +++ b/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamdscan.service @@ -6,4 +6,4 @@ After=clamav-daemon.service [Service] Type=oneshot User=root -ExecStart=/usr/bin/clamdscan --fdpass --multiscan / \ No newline at end of file +ExecStart=/usr/bin/clamdscan --fdpass --multiscan / diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamdscan.timer b/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamdscan.timer new file mode 100644 index 0000000000..841f2c2fc4 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamdscan.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Run ClamAV scan every day + +[Timer] +OnCalendar=daily +RandomizedDelaySec=1h +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/deployment/common/resources/clamav-clamonacc.service b/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamonacc.service similarity index 92% rename from deployment/common/resources/clamav-clamonacc.service rename to data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamonacc.service index 6f33ca8113..0adf095db8 100644 --- a/deployment/common/resources/clamav-clamonacc.service +++ b/data_safe_haven/resources/workspace/ansible/files/etc/systemd/system/clamav-clamonacc.service @@ -12,4 +12,4 @@ Restart=on-failure RestartSec=30 [Install] -WantedBy=multi-user.target \ No newline at end of file +WantedBy=multi-user.target diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/xdg/xfce4/terminal/terminalrc b/data_safe_haven/resources/workspace/ansible/files/etc/xdg/xfce4/terminal/terminalrc new file mode 100644 index 0000000000..ae52511092 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/xdg/xfce4/terminal/terminalrc @@ -0,0 +1,5 @@ +# Dark pastels colourscheme +ColorForeground=#dcdcdc +ColorBackground=#2c2c2c +ColorCursor=#dcdcdc +ColorPalette=#3f3f3f;#705050;#60b48a;#dfaf8f;#9ab8d7;#dc8cc3;#8cd0d3;#dcdcdc;#709080;#dca3a3;#72d5a3;#f0dfaf;#94bff3;#ec93d3;#93e0e3;#ffffff \ No newline at end of file diff --git a/data_safe_haven/resources/workspace/ansible/files/etc/xrdp/xrdp.ini b/data_safe_haven/resources/workspace/ansible/files/etc/xrdp/xrdp.ini new file mode 100644 index 0000000000..2e29d0545c --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/etc/xrdp/xrdp.ini @@ -0,0 +1,208 @@ +[Globals] +; xrdp.ini file version number +ini_version=1 + +; fork a new process for each incoming connection +fork=true + +; ports to listen on, number alone means listen on all interfaces +; 0.0.0.0 or :: if ipv6 is configured +; space between multiple occurrences +; ALL specified interfaces must be UP when xrdp starts, otherwise xrdp will fail to start +; +; Examples: +; port=3389 +; port=unix://./tmp/xrdp.socket +; port=tcp://.:3389 127.0.0.1:3389 +; port=tcp://:3389 *:3389 +; port=tcp://:3389 192.168.1.1:3389 +; port=tcp6://.:3389 ::1:3389 +; port=tcp6://:3389 *:3389 +; port=tcp6://{}:3389 {FC00:0:0:0:0:0:0:1}:3389 +; port=vsock://: +port=3389 + +; 'port' above should be connected to with vsock instead of tcp +; use this only with number alone in port above +; prefer use vsock://: above +use_vsock=false + +; regulate if the listening socket use socket option tcp_nodelay +; no buffering will be performed in the TCP stack +tcp_nodelay=true + +; regulate if the listening socket use socket option keepalive +; if the network connection disappear without close messages the connection will be closed +tcp_keepalive=true + +; set tcp send/recv buffer (for experts) +#tcp_send_buffer_bytes=32768 +#tcp_recv_buffer_bytes=32768 + +; security layer can be 'tls', 'rdp' or 'negotiate' +; for client compatible layer +security_layer=negotiate + +; minimum security level allowed for client for classic RDP encryption +; use tls_ciphers to configure TLS encryption +; can be 'none', 'low', 'medium', 'high', 'fips' +crypt_level=high + +; X.509 certificate and private key +; openssl req -x509 -newkey rsa:2048 -nodes -keyout key.pem -out cert.pem -days 365 +; note this needs the user xrdp to be a member of the ssl-cert group, do with e.g. +;$ sudo adduser xrdp ssl-cert +certificate= +key_file= + +; set SSL protocols +; can be comma separated list of 'SSLv3', 'TLSv1', 'TLSv1.1', 'TLSv1.2', 'TLSv1.3' +ssl_protocols=TLSv1.2, TLSv1.3 +; set TLS cipher suites +#tls_ciphers=HIGH + +; concats the domain name to the user if set for authentication with the separator +; for example when the server is multi homed with SSSd +#domain_user_separator=@ + +; The following options will override the keyboard layout settings. +; These options are for DEBUG and are not recommended for regular use. +#xrdp.override_keyboard_type=0x04 +#xrdp.override_keyboard_subtype=0x01 +#xrdp.override_keylayout=0x00000409 + +; Section name to use for automatic login if the client sends username +; and password. If empty, the domain name sent by the client is used. +; If empty and no domain name is given, the first suitable section in +; this file will be used. +autorun= + +allow_channels=true +allow_multimon=true +bitmap_cache=true +bitmap_compression=true +bulk_compression=true +#hidelogwindow=true +max_bpp=32 +new_cursors=true +; fastpath - can be 'input', 'output', 'both', 'none' +use_fastpath=both +; when true, userid/password *must* be passed on cmd line +#require_credentials=true +; when true, the userid will be used to try to authenticate +#enable_token_login=true +; You can set the PAM error text in a gateway setup (MAX 256 chars) +#pamerrortxt=change your password according to policy at http://url + +; +; colors used by windows in RGB format +; +blue=ffffff # used for selected titlebar +grey=dedede # used for button faces +dark_grey=808080 # used for unselected titlebar and button edges + +; +; configure login screen +; + +; Login Screen Window Title +ls_title=. + +; top level window background color in RGB format +ls_top_window_bg_color=8a8a8a + +; width and height of login screen +; +; The default height allows for about 5 fields to be comfortably displayed +; above the buttons at the bottom. To display more fields, make +; larger, and also increase and +; below +; +ls_width=350 +ls_height=430 + +; login screen background color in RGB format +ls_bg_color=ffffff + +; optional background image filename (bmp format). +#ls_background_image= + +; logo +; full path to bmp-file or file in shared folder +ls_logo_filename=/usr/local/share/xrdp/dsh_logo_240x140_256color.bmp +ls_logo_x_pos=55 +ls_logo_y_pos=50 + +; for positioning labels such as username, password etc +ls_label_x_pos=30 +ls_label_width=65 + +; for positioning text and combo boxes next to above labels +ls_input_x_pos=110 +ls_input_width=210 + +; y pos for first label and combo box +ls_input_y_pos=220 + +; OK button +ls_btn_ok_x_pos=20 +ls_btn_ok_y_pos=350 +ls_btn_ok_width=150 +ls_btn_ok_height=50 + +; Cancel button +ls_btn_cancel_x_pos=180 +ls_btn_cancel_y_pos=350 +ls_btn_cancel_width=150 +ls_btn_cancel_height=50 + +[Logging] +; Note: Log levels can be any of: core, error, warning, info, debug, or trace +LogFile=xrdp.log +LogLevel=INFO +EnableSyslog=true +#SyslogLevel=INFO +#EnableConsole=false +#ConsoleLevel=INFO +#EnableProcessId=false + +[LoggingPerLogger] +; Note: per logger configuration is only used if xrdp is built with +; --enable-devel-logging +#xrdp.c=INFO +#main()=INFO + +[Channels] +; Channel names not listed here will be blocked by XRDP. +; You can block any channel by setting its value to false. +; IMPORTANT! All channels are not supported in all use +; cases even if you set all values to true. +; You can override these settings on each session type +; These settings are only used if allow_channels=true +rdpdr=true +rdpsnd=true +drdynvc=true +cliprdr=true +rail=true +xrdpvr=true +tcutils=true + +; for debugging xrdp, in section xrdp1, change port=-1 to this: +#port=/tmp/.xrdp/xrdp_display_10 + + +; +; Session types +; + +; Some session types such as Xorg, X11rdp and Xvnc start a display server. +; Startup command-line parameters for the display server are configured +; in sesman.ini. See and configure also sesman.ini. +[Xorg] +name=Xorg +lib=libxup.so +username=ask +password=ask +ip=127.0.0.1 +port=-1 +code=20 diff --git a/data_safe_haven/resources/workspace/ansible/files/usr/local/bin/privileged-rules b/data_safe_haven/resources/workspace/ansible/files/usr/local/bin/privileged-rules new file mode 100644 index 0000000000..8b7703069b --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/bin/privileged-rules @@ -0,0 +1,16 @@ +#! /usr/bin/env bash + +# Find executables with suid and sgid and write auditd rules to monitor their usage +# CIS Ubuntu 22.04 LTS benchmark recommendation 6.3.3.6 +UID_MIN=$(awk '/^\s*UID_MIN/{print $2}' /etc/login.defs) +AUDIT_RULE_FILE="/etc/audit/rules.d/50-privileged.rules" +NEW_DATA=() +for PARTITION in $(findmnt -n -l -k -it $(awk '/nodev/ { print $2 }' /proc/filesystems | paste -sd,) | grep -Pv "noexec|nosuid" | awk '{print $1}'); do + readarray -t DATA < <(find "${PARTITION}" -xdev -perm /6000 -type f | awk -v UID_MIN=${UID_MIN} '{print "-a always,exit -F path=" $1 " -F perm=x -F auid>="UID_MIN" -F auid!=unset -k privileged" }') + for ENTRY in "${DATA[@]}"; do + NEW_DATA+=("${ENTRY}") + done +done +readarray &> /dev/null -t OLD_DATA < "${AUDIT_RULE_FILE}" +COMBINED_DATA=( "${OLD_DATA[@]}" "${NEW_DATA[@]}" ) +printf '%s\n' "${COMBINED_DATA[@]}" | sort -u diff --git a/data_safe_haven/resources/workspace/ansible/files/usr/local/share/xrdp/dsh_logo_240x140_256color.bmp b/data_safe_haven/resources/workspace/ansible/files/usr/local/share/xrdp/dsh_logo_240x140_256color.bmp new file mode 100644 index 0000000000..065de8eefa Binary files /dev/null and b/data_safe_haven/resources/workspace/ansible/files/usr/local/share/xrdp/dsh_logo_240x140_256color.bmp differ diff --git a/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/run_all_tests.bats b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/run_all_tests.bats new file mode 100644 index 0000000000..c2e9550a71 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/run_all_tests.bats @@ -0,0 +1,128 @@ +#! /usr/bin/env bats + + +# Helper functions +# ---------------- +initialise_python_environment() { + ENV_PATH="${HOME}/.local/bats-python-environment" + rm -rf "$ENV_PATH" + python -m venv "$ENV_PATH" + source "${ENV_PATH}/bin/activate" + pip install --upgrade pip --quiet +} + +initialise_r_environment() { + ENV_PATH="${HOME}/.local/bats-r-environment" + rm -rf "$ENV_PATH" + mkdir -p "$ENV_PATH" +} + +install_r_package() { + PACKAGE_NAME="$1" + ENV_PATH="${HOME}/.local/bats-r-environment" + Rscript -e "install.packages('$PACKAGE_NAME', lib='$ENV_PATH');" +} + +install_r_package_version() { + PACKAGE_NAME="$1" + PACKAGE_VERSION="$2" + ENV_PATH="${HOME}/.local/bats-r-environment" + Rscript -e "install.packages('remotes', lib='$ENV_PATH');" + Rscript -e "library('remotes', lib='$ENV_PATH'); remotes::install_version(package='$PACKAGE_NAME', version='$PACKAGE_VERSION', lib='$ENV_PATH');" +} + +check_db_credentials() { + db_credentials="/etc/database_credential" + if [ -f "$db_credentials" ]; then + return 0 + fi + return 1 +} + + +# Mounted drives +# -------------- +@test "Mounted drives (/data)" { + run bash test_mounted_drives.sh -d data + [ "$status" -eq 0 ] +} +@test "Mounted drives (/home)" { + run bash test_mounted_drives.sh -d home + [ "$status" -eq 0 ] +} +@test "Mounted drives (/output)" { + run bash test_mounted_drives.sh -d output + [ "$status" -eq 0 ] +} +@test "Mounted drives (/shared)" { + run bash test_mounted_drives.sh -d shared + [ "$status" -eq 0 ] +} + + +# Package repositories +# -------------------- +@test "Python package repository" { + initialise_python_environment + run bash test_repository_python.sh 2>&1 + [ "$status" -eq 0 ] +} +@test "R package repository" { + initialise_r_environment + run bash test_repository_R.sh + [ "$status" -eq 0 ] +} + + +# Language functionality +# ---------------------- +@test "Python functionality" { + initialise_python_environment + pip install numpy pandas scikit-learn --quiet + run python test_functionality_python.py 2>&1 + [ "$status" -eq 0 ] +} +@test "R functionality" { + initialise_r_environment + install_r_package_version "MASS" "7.3-52" + run Rscript test_functionality_R.R + [ "$status" -eq 0 ] +} + + +# Databases +# --------- +# Test MS SQL database +@test "MS SQL database (Python)" { + check_db_credentials || skip "No database credentials available" + initialise_python_environment + pip install pandas psycopg pymssql --quiet + run bash test_databases.sh -d mssql -l python + [ "$status" -eq 0 ] +} +@test "MS SQL database (R)" { + check_db_credentials || skip "No database credentials available" + initialise_r_environment + install_r_package "DBI" + install_r_package "odbc" + install_r_package "RPostgres" + run bash test_databases.sh -d mssql -l R + [ "$status" -eq 0 ] +} +# Test Postgres database +@test "Postgres database (Python)" { + check_db_credentials || skip "No database credentials available" + initialise_python_environment + pip install pandas psycopg pymssql --quiet + run bash test_databases.sh -d postgresql -l python + [ "$status" -eq 0 ] +} +@test "Postgres database (R)" { + check_db_credentials || skip "No database credentials available" + initialise_r_environment + install_r_package "DBI" + install_r_package "odbc" + install_r_package "RPostgres" + run bash test_databases.sh -d postgresql -l R + [ "$status" -eq 0 ] +} diff --git a/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases.sh b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases.sh new file mode 100644 index 0000000000..c09ff85602 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases.sh @@ -0,0 +1,51 @@ +#! /bin/bash +db_type="" +language="" +while getopts d:l: flag; do + case "${flag}" in + d) db_type=${OPTARG} ;; + l) language=${OPTARG} ;; + *) + echo "Invalid option ${OPTARG}" + exit 1 + ;; + esac +done + +db_credentials="/etc/database_credential" +if [ -f "$db_credentials" ]; then + username="databaseadmin" + password="$(cat "$db_credentials")" +else + echo "Credentials file ($db_credentials) not found." + exit 1 +fi + +sre_fqdn="$(grep trusted /etc/pip.conf | cut -d "." -f 2-99)" +sre_prefix="$(hostname | cut -d "-" -f 1-4)" +if [ "$db_type" == "mssql" ]; then + db_name="master" + port="1433" + server_name="mssql.${sre_fqdn}" + hostname="${sre_prefix}-db-server-mssql" +elif [ "$db_type" == "postgresql" ]; then + db_name="postgres" + port="5432" + server_name="postgresql.${sre_fqdn}" + hostname="${sre_prefix}-db-server-postgresql" +else + echo "Did not recognise database type '$db_type'" + exit 1 +fi + +if [ "$port" == "" ]; then + echo "Database type '$db_type' is not part of this SRE" + exit 1 +else + script_path=$(dirname "$(readlink -f "$0")") + if [ "$language" == "python" ]; then + python "${script_path}"/test_databases_python.py --db-type "$db_type" --db-name "$db_name" --port "$port" --server-name "$server_name" --hostname "$hostname" --username "$username" --password "$password" || exit 1 + elif [ "$language" == "R" ]; then + Rscript "${script_path}"/test_databases_R.R "$db_type" "$db_name" "$port" "$server_name" "$hostname" "$username" "$password" || exit 1 + fi +fi diff --git a/tests/srd_smoke_tests/test_databases_R.R b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases_R.R similarity index 52% rename from tests/srd_smoke_tests/test_databases_R.R rename to data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases_R.R index 70c1f4a889..efade84bcf 100644 --- a/tests/srd_smoke_tests/test_databases_R.R +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases_R.R @@ -1,16 +1,20 @@ #!/usr/bin/env Rscript -library(DBI) -library(odbc) +library(DBI, lib.loc='~/.local/bats-r-environment') +library(odbc, lib.loc='~/.local/bats-r-environment') +library(RPostgres, lib.loc='~/.local/bats-r-environment') # Parse command line arguments args = commandArgs(trailingOnly=TRUE) -if (length(args)!=4) { - stop("Exactly four arguments are required: db_type, db_name, port and server_name") +if (length(args)!=7) { + stop("Exactly seven arguments are required: db_type, db_name, port, server_name, hostname, username and password") } db_type = args[1] db_name = args[2] port = args[3] server_name = args[4] +hostname = args[5] +username = args[6] +password = args[7] # Connect to the database print(paste("Attempting to connect to '", db_name, "' on '", server_name, "' via port '", port, sep="")) @@ -20,15 +24,21 @@ if (db_type == "mssql") { Driver = "ODBC Driver 17 for SQL Server", Server = paste(server_name, port, sep=","), Database = db_name, - Trusted_Connection = "yes" + # Trusted_Connection = "yes", + UID = paste(username, "@", hostname, sep=""), + PWD = password ) -} else if (db_type == "postgres") { +} else if (db_type == "postgresql") { cnxn <- DBI::dbConnect( RPostgres::Postgres(), host = server_name, port = port, - dbname = db_name + dbname = db_name, + user = username, + password = password ) +} else { + stop(paste("Database type '", db_type, "' was not recognised", sep="")) } # Run a query and save the output into a dataframe @@ -36,4 +46,6 @@ df <- dbGetQuery(cnxn, "SELECT * FROM information_schema.tables;") if (dim(df)[1] > 0) { print(head(df, 5)) print("All database tests passed") +} else { + stop(paste("Reading from database '", db_name, "' failed", sep="")) } diff --git a/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases_python.py b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases_python.py new file mode 100644 index 0000000000..c3401bae29 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_databases_python.py @@ -0,0 +1,66 @@ +#! /usr/bin/env python +import argparse + +import pandas as pd +import psycopg +import pymssql + + +def test_database( + server_name: str, + hostname: str, + port: int, + db_type: str, + db_name: str, + username: str, + password: str, +) -> None: + msg = f"Attempting to connect to '{db_name}' on '{server_name}' via port {port}" + print(msg) # noqa: T201 + username_full = f"{username}@{hostname}" + cnxn = None + if db_type == "mssql": + cnxn = pymssql.connect( + server=server_name, user=username_full, password=password, database=db_name + ) + elif db_type == "postgresql": + connection_string = f"host={server_name} port={port} dbname={db_name} user={username} password={password}" + cnxn = psycopg.connect(connection_string) + else: + msg = f"Database type '{db_type}' was not recognised" + raise ValueError(msg) + df = pd.read_sql("SELECT * FROM information_schema.tables;", cnxn) + if df.size: + print(df.head(5)) # noqa: T201 + print("All database tests passed") # noqa: T201 + else: + msg = f"Reading from database '{db_name}' failed." + raise ValueError(msg) + + +# Parse command line arguments +parser = argparse.ArgumentParser() +parser.add_argument( + "--db-type", + type=str, + choices=["mssql", "postgresql"], + help="Which database type to use", +) +parser.add_argument("--db-name", type=str, help="Which database to connect to") +parser.add_argument("--port", type=str, help="Which port to connect to") +parser.add_argument("--server-name", type=str, help="Which server to connect to") +parser.add_argument("--username", type=str, help="Database username") +parser.add_argument("--hostname", type=str, help="Azure hostname of the server") +parser.add_argument("--password", type=str, help="Database user password") +args = parser.parse_args() + +# Run database test +test_database( + args.server_name, + args.hostname, + args.port, + args.db_type, + args.db_name, + args.username, + args.password, +) diff --git a/tests/srd_smoke_tests/test_functionality_R.R b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_functionality_R.R similarity index 95% rename from tests/srd_smoke_tests/test_functionality_R.R rename to data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_functionality_R.R index 025c8a1b8b..94c351e7c3 100644 --- a/tests/srd_smoke_tests/test_functionality_R.R +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_functionality_R.R @@ -1,5 +1,5 @@ # Test logistic regression using R -library('MASS') +library('MASS', lib.loc='~/.local/bats-r-environment') library('stats') gen_data <- function(n = 100, p = 3) { @@ -36,5 +36,4 @@ data <- rnorm(n = N, mean = data_means, sd = 0.5) hc <- hclust(dist(data)) print("Clustering ran OK") - print("All functionality tests passed") diff --git a/tests/srd_smoke_tests/test_functionality_python.py b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_functionality_python.py similarity index 80% rename from tests/srd_smoke_tests/test_functionality_python.py rename to data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_functionality_python.py index 9ca9662d98..449e76be9d 100644 --- a/tests/srd_smoke_tests/test_functionality_python.py +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_functionality_python.py @@ -1,10 +1,11 @@ """Test logistic regression using python""" + import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression -def gen_data(n_samples, n_points): +def gen_data(n_samples: int, n_points: int) -> pd.DataFrame: """Generate data for fitting""" target = np.random.binomial(n=1, p=0.5, size=(n_samples, 1)) theta = np.random.normal(loc=0.0, scale=1.0, size=(1, n_points)) @@ -12,13 +13,13 @@ def gen_data(n_samples, n_points): values = np.random.multivariate_normal( means, np.diag([1] * n_points), size=n_samples ).T - data = dict(("x{}".format(n), values[n]) for n in range(n_points)) + data = {f"x{n}": values[n] for n in range(n_points)} data["y"] = target.reshape((n_samples,)) data["weights"] = np.random.gamma(shape=1, scale=1.0, size=n_samples) return pd.DataFrame(data=data) -def main(): +def main() -> None: """Logistic regression""" data = gen_data(100, 3) input_data = data.iloc[:, :-2] @@ -29,8 +30,8 @@ def main(): logit.fit(input_data, output_data, sample_weight=weights) logit.score(input_data, output_data, sample_weight=weights) - print("Logistic model ran OK") - print("All functionality tests passed") + print("Logistic model ran OK") # noqa: T201 + print("All functionality tests passed") # noqa: T201 if __name__ == "__main__": diff --git a/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_mounted_drives.sh b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_mounted_drives.sh new file mode 100644 index 0000000000..a1812934b9 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_mounted_drives.sh @@ -0,0 +1,66 @@ +#! /bin/bash +while getopts d: flag +do + case "${flag}" in + d) directory=${OPTARG};; + *) + echo "Usage: $0 -d [directory]" + exit 1 + esac +done + +nfailed=0 +if [[ "$directory" = "home" ]]; then directory_path=$(echo ~); else directory_path="/${directory}"; fi +testfile="$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 32 | head -n 1)" + +# Check that directory exists +if [ "$(ls "${directory_path}" 2>&1 1>/dev/null)" ]; then + echo "Could not find mount '${directory_path}'" + nfailed=$((nfailed + 1)) +fi + +# Test operations +CAN_CREATE="$([[ "$(touch "${directory_path}/${testfile}" 2>&1 1>/dev/null)" = "" ]] && echo '1' || echo '0')" +CAN_WRITE="$([[ -w "${directory_path}/${testfile}" ]] && echo '1' || echo '0')" +CAN_DELETE="$([[ "$(touch "${directory_path}/${testfile}" 2>&1 1>/dev/null && rm "${directory_path}/${testfile}" 2>&1)" ]] && echo '0' || echo '1')" + +# Check that permissions are as expected for each directory +case "$directory" in + data) + if [ "$CAN_CREATE" = 1 ]; then echo "Able to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_WRITE" = 1 ]; then echo "Able to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_DELETE" = 1 ]; then echo "Able to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + ;; + + home) + if [ "$CAN_CREATE" = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_WRITE" = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_DELETE" = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + ;; + + output) + if [ "$CAN_CREATE" = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_WRITE" = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_DELETE" = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + ;; + + shared) + if [ "$CAN_CREATE" = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_WRITE" = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + if [ "$CAN_DELETE" = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi + ;; + + *) + echo "Usage: $0 -d [directory]" + exit 1 +esac + +# Cleanup and print output +rm -f "${directory_path}/${testfile}" 2> /dev/null +if [ $nfailed = 0 ]; then + echo "All tests passed for '${directory_path}'" + exit 0 +else + echo "$nfailed tests failed for '${directory_path}'!" + exit $nfailed +fi diff --git a/tests/srd_smoke_tests/test_repository_R.mustache.sh b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_repository_R.sh similarity index 81% rename from tests/srd_smoke_tests/test_repository_R.mustache.sh rename to data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_repository_R.sh index 49636d6276..ed0c1aee25 100644 --- a/tests/srd_smoke_tests/test_repository_R.mustache.sh +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_repository_R.sh @@ -7,12 +7,13 @@ packages=("askpass" "zeallot") uninstallable_packages=("aws.s3") # Create a temporary library directory -TEST_INSTALL_PATH="${HOME}/test-repository-R" -rm -rf "$TEST_INSTALL_PATH" -mkdir -p "$TEST_INSTALL_PATH" +TEST_INSTALL_PATH="${HOME}/.local/bats-r-environment" +# TEST_INSTALL_PATH="${HOME}/test-repository-R" +# rm -rf "$TEST_INSTALL_PATH" +# mkdir -p "$TEST_INSTALL_PATH" # Install sample packages to local user library -OUTCOME=0 +N_FAILURES=0 for package in "${packages[@]}"; do echo "Attempting to install ${package}..." Rscript -e "options(warn=-1); install.packages('${package}', lib='${TEST_INSTALL_PATH}', quiet=TRUE)" @@ -20,18 +21,18 @@ for package in "${packages[@]}"; do echo "... $package installation succeeded" else echo "... $package installation failed" - OUTCOME=1 + N_FAILURES=$((N_FAILURES + 1)) fi done # If requested, demonstrate that installation fails for packages *not* on the approved list -TEST_FAILURE={{SmokeTests.TestFailures}} +TEST_FAILURE=0 if [ $TEST_FAILURE -eq 1 ]; then for package in "${uninstallable_packages[@]}"; do echo "Attempting to install ${package}..." Rscript -e "options(warn=-1); install.packages('${package}', lib='${TEST_INSTALL_PATH}', quiet=TRUE)" if (Rscript -e "library('${package}', lib.loc='${TEST_INSTALL_PATH}')"); then echo "... $package installation unexpectedly succeeded!" - OUTCOME=1 + N_FAILURES=$((N_FAILURES + 1)) else echo "... $package installation failed as expected" fi @@ -39,8 +40,10 @@ if [ $TEST_FAILURE -eq 1 ]; then fi rm -rf "$TEST_INSTALL_PATH" -if [ $OUTCOME -eq 0 ]; then +if [ $N_FAILURES -eq 0 ]; then echo "All package installations behaved as expected" + exit 0 else echo "One or more package installations did not behave as expected!" + exit $N_FAILURES fi diff --git a/tests/srd_smoke_tests/test_repository_python.mustache.sh b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_repository_python.sh similarity index 68% rename from tests/srd_smoke_tests/test_repository_python.mustache.sh rename to data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_repository_python.sh index 078dcff167..311c352f26 100644 --- a/tests/srd_smoke_tests/test_repository_python.mustache.sh +++ b/data_safe_haven/resources/workspace/ansible/files/usr/local/smoke_tests/test_repository_python.sh @@ -1,39 +1,31 @@ #! /bin/bash -# shellcheck disable=SC1091 # We need to test packages that are: # - *not* pre-installed -# - on the tier-3 list (so we can test all tiers) +# - on the allowlist (so we can test this is working) # - alphabetically early and late (so we can test the progress of the mirror synchronisation) -installable_packages=("{{SmokeTests.PyPIPackageFirst}}" "{{SmokeTests.PyPIPackageLast}}") +installable_packages=("contourpy" "tzdata") uninstallable_packages=("awscli") -# Set up a virtual environment for testing -TEST_INSTALL_PATH="${HOME}/test-repository-python" -rm -rf "$TEST_INSTALL_PATH" -python -m venv "$TEST_INSTALL_PATH" -source "${TEST_INSTALL_PATH}/bin/activate" -pip install --upgrade pip --quiet - # Install sample packages to local user library -OUTCOME=0 +N_FAILURES=0 for package in "${installable_packages[@]}"; do echo "Attempting to install ${package}..." if (pip install "$package" --quiet); then echo "... $package installation succeeded" else echo "... $package installation failed" - OUTCOME=1 + N_FAILURES=$((N_FAILURES + 1)) fi done # If requested, demonstrate that installation fails for packages *not* on the approved list -TEST_FAILURE={{SmokeTests.TestFailures}} +TEST_FAILURE=0 if [ $TEST_FAILURE -eq 1 ]; then for package in "${uninstallable_packages[@]}"; do echo "Attempting to install ${package}..." if (pip install "$package" --quiet); then echo "... $package installation unexpectedly succeeded!" - OUTCOME=1 + N_FAILURES=$((N_FAILURES + 1)) else echo "... $package installation failed as expected" fi @@ -41,8 +33,10 @@ if [ $TEST_FAILURE -eq 1 ]; then fi rm -rf "$TEST_INSTALL_PATH" -if [ $OUTCOME -eq 0 ]; then +if [ $N_FAILURES -eq 0 ]; then echo "All package installations behaved as expected" + exit 0 else echo "One or more package installations did not behave as expected!" + exit $N_FAILURES fi diff --git a/data_safe_haven/resources/workspace/ansible/host_vars/localhost.yaml b/data_safe_haven/resources/workspace/ansible/host_vars/localhost.yaml new file mode 100644 index 0000000000..1baab38e7f --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/host_vars/localhost.yaml @@ -0,0 +1,153 @@ +--- + +# Packages by category +# Each category has 'common' packages for when the package name is identical +# for all releases +# Other packages are listed in dicts with the name of the release as given by +# `ansible_facts['distribution_release']` +package_categories: + - category: admin + common: + - auditd + - bats + - clamav + - clamav-daemon + - clamav-unofficial-sigs + - ubuntu-drivers-common + jammy: [] + noble: [] + - category: browsers + common: + - firefox + jammy: [] + noble: [] + - category: database + common: + - libpq-dev # interact with PostgreSQL databases + - msodbcsql17 # interact with Microsoft SQL databases + - postgresql-client # CLI psql client + - unixodbc-dev # interact with Microsoft SQL databases + jammy: [] + noble: [] + - category: development + common: + - build-essential # Common build tools (GCC, make, etc.) + - cabal-install # Haskell Cabal packaging tools + - cargo # Rust development and packaging tool + - clojure # Clojure functional programming language for JVM + - cmake # CMake build system + - default-jre # Java runtime environment + - dotnet-runtime-8.0 # .Net runtime + - dotnet-sdk-8.0 # .Net SDK + - fsharp # F# functional-first programming language + - g++ # GNU C++ compiler + - gcc # GNU C compiler + - gfortran # GNU Fortran compiler + - ghc # Glasgow Haskell Compiler + - git-all # Git including all subpackages + - golang # Go programming language + - guile-3.0 # GNU Scheme functional programming language + - ninja-build # Ninja build system + - octave # Open source Matlab implementation + - python-is-python3 # symlinks python to python3 + - python3 # System Python3 distribution + - python3-venv # Python3 venv module + - r-base # R programming language + - racket # Racket functional programming language implementation + - racket-common # Racket shared files + - ruby-full # Complete Ruby distribution + - rustc # Rust compiler + - scala # Scala multi-paradigm programming language for JVM + - scala-library # Scala standard library + - weka # Data mining tools + jammy: + - python3.10 + - python3.10-venv + noble: + - python3.12 + - python3.12-venv + - category: editors + common: + - emacs + - nano + - neovim + - vim + - vim-airline + - vim-airline-themes + - vim-fugitive + - vim-gtk3 + - vim-julia + - vim-latexsuite + - vim-syntastic + jammy: [] + noble: [] + - category: office + common: + - libreoffice + - texlive-full + jammy: [] + noble: [] + - category: remote_desktop + common: + - libnss-ldapd # LDAP login + - libpam-ldapd # LDAP login + - ldap-utils # LDAP login + - nfs-common # Mounting shared volumes + - xfce4 # XFCE desktop + - xrdp # remote desktop client + jammy: [] + noble: [] + - category: shells + common: + - bash + - fish + - zsh + - zsh-syntax-highlighting + - zsh-autosuggestions + jammy: [] + noble: [] + - category: utilities + common: + - bat + - fd-find + - fzf + - htop + - httpie + - nvtop + - ripgrep + - rsync + - tmux + - wget + - xfce4-terminal + jammy: [] + noble: + - eza + - nvtop + +# Collect packages for releases +# It is important that the dict names for releases match `ansible_facts['distribution_relesase']` +apt_packages: + common: "{{ package_categories | map(attribute='common') | flatten }}" + jammy: "{{ package_categories | map(attribute='jammy') | flatten }}" + noble: "{{ package_categories | map(attribute='noble') | flatten }}" + +# Note that RStudio does not currently have a separate release for noble +deb_packages: + jammy: + - source: https://download1.rstudio.org/electron/jammy/amd64 + filename: rstudio-2024.04.2-764-amd64.deb + sha256: 1d0bd2f54215f514a8a78a4d035c7804218bb8fafa417aa5083d341e174e6452 + creates: /usr/bin/rstudio + noble: + - source: https://download1.rstudio.org/electron/jammy/amd64 + filename: rstudio-2024.04.2-764-amd64.deb + sha256: 1d0bd2f54215f514a8a78a4d035c7804218bb8fafa417aa5083d341e174e6452 + creates: /usr/bin/rstudio + +snap_packages: + - name: codium + classic: true + - name: dbeaver-ce + classic: false + - name: pycharm-community + classic: true diff --git a/data_safe_haven/resources/workspace/ansible/install_deb.sh b/data_safe_haven/resources/workspace/ansible/install_deb.sh new file mode 100644 index 0000000000..c3d4fb9919 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/install_deb.sh @@ -0,0 +1,33 @@ +#! /bin/bash + +# Require three arguments: remote name, debfile name and sha256 hash +if [ $# -ne 3 ]; then + echo "FATAL: Incorrect number of arguments" + exit 1 +fi +PACKAGE_REMOTE=$1 +PACKAGE_DEBFILE=$2 +PACKAGE_HASH=$3 + +# Download and verify the .deb file +echo "Downloading and verifying deb file ${PACKAGE_DEBFILE}" +mkdir -p /tmp/build/ +wget -nv "${PACKAGE_REMOTE}/${PACKAGE_DEBFILE}" -P /tmp/build/ +ls -alh "/tmp/build/${PACKAGE_DEBFILE}" +echo "$PACKAGE_HASH /tmp/build/${PACKAGE_DEBFILE}" > "/tmp/${PACKAGE_DEBFILE}_sha256.hash" +if [ "$(sha256sum -c "/tmp/${PACKAGE_DEBFILE}_sha256.hash" | grep FAILED)" != "" ]; then + echo "FATAL: Checksum did not match expected for $PACKAGE_DEBFILE" + exit 1 +fi + +# Wait until the package repository is not in use +while ! apt-get check >/dev/null 2>&1; do + echo "Waiting for another installation process to finish..." + sleep 1 +done + +# Install and cleanup +echo "Installing deb file: ${PACKAGE_DEBFILE}" +apt install -y "/tmp/build/${PACKAGE_DEBFILE}" +echo "Cleaning up" +rm "/tmp/build/${PACKAGE_DEBFILE}" diff --git a/data_safe_haven/resources/workspace/ansible/templates/etc/audit/rules.d/audit.rules.j2 b/data_safe_haven/resources/workspace/ansible/templates/etc/audit/rules.d/audit.rules.j2 new file mode 100644 index 0000000000..a1011294b8 --- /dev/null +++ b/data_safe_haven/resources/workspace/ansible/templates/etc/audit/rules.d/audit.rules.j2 @@ -0,0 +1,115 @@ +# This file contains the auditctl rules that are loaded +# whenever the audit daemon is started via the initscripts. +# The rules are simply the parameters that would be passed +# to auditctl. + +# Section numbers refer to the CIS Ubuntu 22.04 LTS benchmark + +# First rule - delete all +-D + +# Increase the buffers to survive stress events. +# Make this bigger for busy systems +-b 8192 + +# This determines how long to wait in bursts of events +--backlog_wait_time 0 + +# Set failure mode to syslog +-f 1 + +# Record all sockets +-a always,exit -F arch=b64 -F a0=2 -S socket -k dsh-socket + +# Record all connections +-a always,exit -F arch=b64 -F a0=2 -S connect -k dsh-connect + +# 6.3.3.1 Ensure changes to system administration scope (sudoers) is collected +-w /etc/sudoers -p wa -k scope +-w /etc/sudoers.d -p wa -k scope + +# 6.3.3.2 Ensure actions as another user are always logged +-a always,exit -F arch=b64 -C euid!=uid -F auid!=unset -S execve -k user_emulation +-a always,exit -F arch=b32 -C euid!=uid -F auid!=unset -S execve -k user_emulation + +# 6.3.3.3 Ensure events that modify the sudo log file are collected +-w /var/log/sudo.log -p wa -k sudo_log_file + +# 6.3.3.4 Ensure events that modify date and time information are # collected +-a always,exit -F arch=b64 -S adjtimex,settimeofday,clock_settime -k time- change +-a always,exit -F arch=b32 -S adjtimex,settimeofday,clock_settime -k time- change +-w /etc/localtime -p wa -k time-change + +# 6.3.3.5 Ensure events that modify the system's network environment are collected +-a always,exit -F arch=b64 -S sethostname,setdomainname -k system-locale +-a always,exit -F arch=b32 -S sethostname,setdomainname -k system-locale +-w /etc/issue -p wa -k system-locale +-w /etc/issue.net -p wa -k system-locale +-w /etc/hosts -p wa -k system-locale +-w /etc/networks -p wa -k system-locale +-w /etc/network/ -p wa -k system-locale +-w /etc/netplan/ -p wa -k system-locale + +# 6.3.3.7 Ensure unsuccessful file access attempts are collected +-a always,exit -F arch=b64 -S creat,open,openat,truncate,ftruncate -F exit=- EACCES -F auid>={{ uid_min.stdout }} -F auid!=unset -k access +-a always,exit -F arch=b64 -S creat,open,openat,truncate,ftruncate -F exit=- EPERM -F auid>={{ uid_min.stdout }} -F auid!=unset -k access +-a always,exit -F arch=b32 -S creat,open,openat,truncate,ftruncate -F exit=- EACCES -F auid>={{ uid_min.stdout }} -F auid!=unset -k access +-a always,exit -F arch=b32 -S creat,open,openat,truncate,ftruncate -F exit=- EPERM -F auid>={{ uid_min.stdout }} -F auid!=unset -k access + +# 6.3.3.8 Ensure events that modify user/group information are collected +-w /etc/group -p wa -k identity +-w /etc/passwd -p wa -k identity +-w /etc/gshadow -p wa -k identity +-w /etc/shadow -p wa -k identity +-w /etc/security/opasswd -p wa -k identity +-w /etc/nsswitch.conf -p wa -k identity +-w /etc/pam.conf -p wa -k identity +-w /etc/pam.d -p wa -k identity + +# 6.3.3.9 Ensure discretionary access control permission modification events are collected +-a always,exit -F arch=b64 -S chmod,fchmod,fchmodat -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=perm_mod +-a always,exit -F arch=b64 -S chown,fchown,lchown,fchownat -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=perm_mod +-a always,exit -F arch=b32 -S chmod,fchmod,fchmodat -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=perm_mod +-a always,exit -F arch=b32 -S lchown,fchown,chown,fchownat -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=perm_mod +-a always,exit -F arch=b64 -S setxattr,lsetxattr,fsetxattr,removexattr,lremovexattr,fremovexattr -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=perm_mod +-a always,exit -F arch=b32 -S setxattr,lsetxattr,fsetxattr,removexattr,lremovexattr,fremovexattr -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=perm_mod + +# 6.3.3.10 Ensure successful file system mounts are collected +-a always,exit -F arch=b32 -S mount -F auid>={{ uid_min.stdout }} -F auid!=unset -k mounts +-a always,exit -F arch=b64 -S mount -F auid>={{ uid_min.stdout }} -F auid!=unset -k mounts + +# 6.3.3.11 Ensure session initiation information is collected +-w /var/run/utmp -p wa -k session +-w /var/log/wtmp -p wa -k session +-w /var/log/btmp -p wa -k session + +# 6.3.3.12 Ensure login and logout events are collected +-w /var/log/lastlog -p wa -k logins +-w /var/run/faillock -p wa -k logins + +# 6.3.3.13 Ensure file deletion events by users are collected +-a always,exit -F arch=b64 -S rename,unlink,unlinkat,renameat -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=delete +-a always,exit -F arch=b32 -S rename,unlink,unlinkat,renameat -F auid>={{ uid_min.stdout }} -F auid!=unset -F key=delete + +# 6.3.3.14 Ensure events that modify the system's Mandatory Access Controls are collected +-w /etc/apparmor/ -p wa -k MAC-policy +-w /etc/apparmor.d/ -p wa -k MAC-policy + +# 6.3.3.15 Ensure successful and unsuccessful attempts to use the chcon command are recorded +-a always,exit -F path=/usr/bin/chcon -F perm=x -F auid>={{ uid_min.stdout }} -F auid!=unset -k perm_chng + +# 6.3.3.16 Ensure successful and unsuccessful attempts to use the setfacl command are recorded +-a always,exit -F path=/usr/bin/setfacl -F perm=x -F auid>={{ uid_min.stdout }} -F auid!=unset -k perm_chng + +# 6.3.3.17 Ensure successful and unsuccessful attempts to use the chacl command are recorded +-a always,exit -F path=/usr/bin/chacl -F perm=x -F auid>={{ uid_min.stdout }} -F auid!=unset -k perm_chng + +# 6.3.3.18 Ensure successful and unsuccessful attempts to use the usermod command are recorded +-a always,exit -F path=/usr/sbin/usermod -F perm=x -F auid>={{ uid_min.stdout }} -F auid!=unset -k usermod + +# 6.3.3.19 Ensure kernel module loading unloading and modification is collected +-a always,exit -F arch=b64 -S init_module,finit_module,delete_module -F auid>={{ uid_min.stdout }} -F auid!=unset -k kernel_modules +-a always,exit -F path=/usr/bin/kmod -F perm=x -F auid>={{ uid_min.stdout }} -F auid!=unset -k kernel_modules + +# 6.3.3.20 Ensure the audit configuration is immutable +-e 2 diff --git a/data_safe_haven/resources/workspace/workspace.cloud_init.mustache.yaml b/data_safe_haven/resources/workspace/workspace.cloud_init.mustache.yaml new file mode 100644 index 0000000000..7137b94253 --- /dev/null +++ b/data_safe_haven/resources/workspace/workspace.cloud_init.mustache.yaml @@ -0,0 +1,154 @@ +#cloud-config + +write_files: + - path: "/etc/clamav/freshclam-mirror.conf" + permissions: "0400" + content: | + PrivateMirror {{ clamav_mirror_hostname }} + + - path: "/etc/database_credential" + permissions: "0400" + content: | + {{ database_service_admin_password }} + + - path: "/etc/nslcd.conf" + permissions: "0400" + content: | + # nslcd configuration file. + # http://manpages.ubuntu.com/manpages/bionic/man5/nslcd.conf.5.html + + # Runtime options + uid nslcd + gid nslcd + log syslog debug + + # Do not allow uids lower than 2000 to login + nss_min_uid 2000 + + # General connection options + uri ldap://{{ldap_server_hostname}}:{{ldap_server_port}} + + # Search/mapping options + base {{ldap_user_search_base}} + base {{ldap_group_search_base}} + + # All users that are members of the correct group + filter passwd {{{ldap_user_filter}}} + map passwd loginShell "/bin/bash" + + # One group for each security group belonging to this SRE and for each primary user group for users that belong to a group in this SRE + filter group {{{ldap_group_filter}}} + + - path: "/etc/pip.conf" + permissions: "0444" + content: | + # Use the PyPI proxy for package installation + [global] + index = http://{{software_repository_hostname}}/repository/pypi-proxy + index-url = http://{{software_repository_hostname}}/repository/pypi-proxy/simple + trusted-host = {{software_repository_hostname}} + + - path: "/etc/R/Rprofile.site" + permissions: "0444" + content: | + # Use the CRAN proxy for package installation + local({ + r <- getOption("repos") + r["CRAN"] <- "http://{{software_repository_hostname}}/repository/cran-proxy" + options(repos = r) + }) + + - path: "/etc/systemd/system/desired-state.timer" + permissions: "0644" + content: | + [Unit] + Description=Run desired state script daily + + [Timer] + OnCalendar=daily + Persistent=true + + [Install] + WantedBy=timers.target + + - path: "/etc/systemd/system/desired-state.service" + permissions: "0644" + content: | + [Unit] + Description=Run desired state script + Wants=network-online.target + After=network-online.target + + [Service] + Type=oneshot + ExecStartPre=sleep 1m + ExecStart=/root/desired_state.sh + StandardOutput=journal+console + + - path: "/root/desired_state.sh" + permissions: "0700" + content: | + #!/usr/bin/env bash + pushd /desired_state + ansible-playbook /desired_state/desired_state.yaml + popd + +mounts: + # Desired state configuration is in a blob container mounted as NFSv3 + - ["{{storage_account_data_desired_state_name}}.blob.core.windows.net:/{{storage_account_data_desired_state_name}}/desiredstate", /desired_state, nfs, "ro,_netdev,sec=sys,vers=3,nolock,proto=tcp"] + # Secure data is in a blob container mounted as NFSv3 + - ["{{storage_account_data_private_sensitive_name}}.blob.core.windows.net:/{{storage_account_data_private_sensitive_name}}/ingress", /data, nfs, "ro,_netdev,sec=sys,vers=3,nolock,proto=tcp"] + - ["{{storage_account_data_private_sensitive_name}}.blob.core.windows.net:/{{storage_account_data_private_sensitive_name}}/egress", /output, nfs, "rw,_netdev,sec=sys,vers=3,nolock,proto=tcp"] + # User data is in a file share mounted as NFSv4 + - ["{{storage_account_data_private_user_name}}.file.core.windows.net:/{{storage_account_data_private_user_name}}/shared", /shared, nfs, "_netdev,sec=sys,nconnect=4"] + - ["{{storage_account_data_private_user_name}}.file.core.windows.net:/{{storage_account_data_private_user_name}}/home", /home, nfs, "_netdev,sec=sys,nconnect=4"] + +# Add additional apt repositories +apt: + debconf_selections: + msodbcsql17: msodbcsql17 msodbcsql/ACCEPT_EULA boolean true + http_proxy: http://{{apt_proxy_server_hostname}}:8000 + https_proxy: http://{{apt_proxy_server_hostname}}:8000 + preserve_sources_list: true # Append to the existing /etc/apt/sources.list + sources: + microsoft-general.list: + source: "deb https://packages.microsoft.com/ubuntu/22.04/prod jammy main" + keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) + +# Install necessary apt packages +packages: + - ansible + - libnss-ldapd # LDAP login + - libpam-ldapd # LDAP login + - ldap-utils # LDAP login + - nfs-common # Mounting shared volumes +package_update: true +package_upgrade: true + +runcmd: + # Restart services + - systemctl restart nslcd + + # Mount all external volumes + # -------------------------- + - echo ">=== Mounting all external volumes... ===<" + - grep -v -e '^[[:space:]]*$' /etc/fstab | sed 's|^| /etc/fstab |' + - mount -fav + - while (! mountpoint -q /data); do sleep 5; mount /data; done + - while (! mountpoint -q /desired_state); do sleep 5; mount /desired_state; done + - while (! mountpoint -q /home); do sleep 5; mount /home; done + - while (! mountpoint -q /output); do sleep 5; mount /output; done + - while (! mountpoint -q /shared); do sleep 5; mount /shared; done + - findmnt + + # Enable and start desired state timer + # ------------------------------------ + - echo ">=== Enabling desired state timer... ===<" + - systemctl daemon-reload + - systemctl enable desired-state.timer + - systemctl start desired-state.timer + + # Run desired state service + # ------------------------- + - echo ">=== Running initial desired state configuration... ===<" + - systemctl start desired-state diff --git a/data_safe_haven/serialisers/__init__.py b/data_safe_haven/serialisers/__init__.py new file mode 100644 index 0000000000..bc50594ad1 --- /dev/null +++ b/data_safe_haven/serialisers/__init__.py @@ -0,0 +1,9 @@ +from .azure_serialisable_model import AzureSerialisableModel +from .context_base import ContextBase +from .yaml_serialisable_model import YAMLSerialisableModel + +__all__ = [ + "AzureSerialisableModel", + "ContextBase", + "YAMLSerialisableModel", +] diff --git a/data_safe_haven/serialisers/azure_serialisable_model.py b/data_safe_haven/serialisers/azure_serialisable_model.py new file mode 100644 index 0000000000..389522376f --- /dev/null +++ b/data_safe_haven/serialisers/azure_serialisable_model.py @@ -0,0 +1,101 @@ +"""A YAMLSerialisableModel that can be serialised to and from Azure""" + +from typing import Any, ClassVar, TypeVar + +from data_safe_haven.exceptions import ( + DataSafeHavenAzureError, + DataSafeHavenAzureStorageError, + DataSafeHavenError, +) +from data_safe_haven.external import AzureSdk + +from .context_base import ContextBase +from .yaml_serialisable_model import YAMLSerialisableModel + +T = TypeVar("T", bound="AzureSerialisableModel") + + +class AzureSerialisableModel(YAMLSerialisableModel): + """Base class for configuration that can be written to Azure storage""" + + config_type: ClassVar[str] = "AzureSerialisableModel" + default_filename: ClassVar[str] = "config.yaml" + + @classmethod + def from_remote( + cls: type[T], context: ContextBase, *, filename: str | None = None + ) -> T: + """ + Construct an AzureSerialisableModel from a YAML file in Azure storage. + + Raises: + DataSafeHavenAzureError: if the file cannot be loaded + DataSafeHavenAzureStorageError: if the storage account does not exist + """ + try: + azure_sdk = AzureSdk(subscription_name=context.subscription_name) + config_yaml = azure_sdk.download_blob( + filename or cls.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + return cls.from_yaml(config_yaml) + except DataSafeHavenAzureStorageError as exc: + msg = f"Storage account '{context.storage_account_name}' does not exist." + raise DataSafeHavenAzureStorageError(msg) from exc + except DataSafeHavenError as exc: + msg = f"Could not load file '{filename or cls.default_filename}' from Azure storage." + raise DataSafeHavenAzureError(msg) from exc + + @classmethod + def from_remote_or_create( + cls: type[T], context: ContextBase, **default_args: Any + ) -> T: + """ + Construct an AzureSerialisableModel from a YAML file in Azure storage, or from + default arguments if no such file exists. + """ + if cls.remote_exists(context): + return cls.from_remote(context) + else: + return cls(**default_args) + + @classmethod + def remote_exists( + cls: type[T], context: ContextBase, *, filename: str | None = None + ) -> bool: + """Check whether a remote instance of this model exists.""" + azure_sdk = AzureSdk(subscription_name=context.subscription_name) + if azure_sdk.storage_exists(context.storage_account_name): + return azure_sdk.blob_exists( + filename or cls.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + else: + return False + + def remote_yaml_diff( + self: T, context: ContextBase, *, filename: str | None = None + ) -> list[str]: + """ + Determine the diff of YAML output from the remote model to `self`. + + The diff is given in unified diff format. + """ + remote_model = self.from_remote(context, filename=filename) + + return self.yaml_diff(remote_model, from_name="remote", to_name="local") + + def upload(self: T, context: ContextBase, *, filename: str | None = None) -> None: + """Serialise an AzureSerialisableModel to a YAML file in Azure storage.""" + azure_sdk = AzureSdk(subscription_name=context.subscription_name) + azure_sdk.upload_blob( + self.to_yaml(), + filename or self.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) diff --git a/data_safe_haven/serialisers/context_base.py b/data_safe_haven/serialisers/context_base.py new file mode 100644 index 0000000000..b0cf9d3c24 --- /dev/null +++ b/data_safe_haven/serialisers/context_base.py @@ -0,0 +1,20 @@ +from abc import ABC, abstractmethod +from typing import ClassVar + +from data_safe_haven.types import AzureSubscriptionName, EntraGroupName + + +class ContextBase(ABC): + admin_group_name: EntraGroupName + subscription_name: AzureSubscriptionName + storage_container_name: ClassVar[str] + + @property + @abstractmethod + def resource_group_name(self) -> str: + pass + + @property + @abstractmethod + def storage_account_name(self) -> str: + pass diff --git a/data_safe_haven/serialisers/yaml_serialisable_model.py b/data_safe_haven/serialisers/yaml_serialisable_model.py new file mode 100644 index 0000000000..766a75bd08 --- /dev/null +++ b/data_safe_haven/serialisers/yaml_serialisable_model.py @@ -0,0 +1,83 @@ +"""A pydantic BaseModel that can be serialised to and from YAML""" + +from difflib import unified_diff +from pathlib import Path +from typing import ClassVar, TypeVar + +import yaml +from pydantic import BaseModel, ValidationError + +from data_safe_haven.exceptions import DataSafeHavenConfigError, DataSafeHavenTypeError +from data_safe_haven.types import PathType + +T = TypeVar("T", bound="YAMLSerialisableModel") + + +class YAMLSerialisableModel(BaseModel, validate_assignment=True): + """ + A pydantic BaseModel that can be serialised to and from YAML + """ + + config_type: ClassVar[str] = "YAMLSerialisableModel" + + @classmethod + def from_filepath(cls: type[T], config_file_path: PathType) -> T: + """Construct a YAMLSerialisableModel from a YAML file""" + try: + with open(Path(config_file_path), encoding="utf-8") as f_yaml: + settings_yaml = f_yaml.read() + return cls.from_yaml(settings_yaml) + except FileNotFoundError as exc: + msg = f"Could not find file {config_file_path}." + raise DataSafeHavenConfigError(msg) from exc + + @classmethod + def from_yaml(cls: type[T], settings_yaml: str) -> T: + """Construct a YAMLSerialisableModel from a YAML string""" + try: + settings_dict = yaml.safe_load(settings_yaml) + except yaml.YAMLError as exc: + msg = f"Could not parse {cls.config_type} configuration as YAML." + raise DataSafeHavenConfigError(msg) from exc + + if not isinstance(settings_dict, dict): + msg = f"Unable to parse {cls.config_type} configuration as a dict." + raise DataSafeHavenConfigError(msg) + + try: + return cls.model_validate(settings_dict) + except ValidationError as exc: + msg = f"Could not load {cls.config_type} configuration." + raise DataSafeHavenTypeError(msg) from exc + + def to_filepath(self, config_file_path: PathType) -> None: + """Serialise a YAMLSerialisableModel to a YAML file""" + # Create the parent directory if it does not exist then write YAML + _config_file_path = Path(config_file_path) + _config_file_path.parent.mkdir(parents=True, exist_ok=True) + + with open(_config_file_path, "w", encoding="utf-8") as f_yaml: + f_yaml.write(self.to_yaml()) + + def to_yaml(self, *, warnings: bool = True) -> str: + """Serialise a YAMLSerialisableModel to a YAML string""" + return yaml.dump( + self.model_dump(by_alias=True, mode="json", warnings=warnings), indent=2 + ) + + def yaml_diff( + self, other: T, from_name: str = "other", to_name: str = "self" + ) -> list[str]: + """ + Determine the diff of YAML output from `other` to `self`. + + The diff is given in unified diff format. + """ + return list( + unified_diff( + other.to_yaml().splitlines(keepends=True), + self.to_yaml().splitlines(keepends=True), + fromfile=from_name, + tofile=to_name, + ) + ) diff --git a/data_safe_haven/singleton.py b/data_safe_haven/singleton.py new file mode 100644 index 0000000000..137917f140 --- /dev/null +++ b/data_safe_haven/singleton.py @@ -0,0 +1,15 @@ +"""Definition of a Singleton metaclass""" + +from typing import Any, Generic, TypeVar + +T = TypeVar("T") + + +class Singleton(type, Generic[T]): + # It is not possible to wrap generics in ClassVar (https://github.com/python/mypy/issues/5144) + _instances: dict["Singleton[T]", T] = {} # noqa: RUF012 + + def __call__(cls, *args: Any, **kwargs: Any) -> T: + if cls not in cls._instances: + cls._instances[cls] = super().__call__(*args, **kwargs) + return cls._instances[cls] diff --git a/data_safe_haven/types/__init__.py b/data_safe_haven/types/__init__.py new file mode 100644 index 0000000000..f304520c9c --- /dev/null +++ b/data_safe_haven/types/__init__.py @@ -0,0 +1,49 @@ +from .annotated_types import ( + AzureLocation, + AzureSubscriptionName, + AzureVmSku, + EmailAddress, + EntraGroupName, + Fqdn, + Guid, + IpAddress, + SafeString, + TimeZone, + UniqueList, +) +from .enums import ( + AzureDnsZoneNames, + AzureSdkCredentialScope, + DatabaseSystem, + FirewallPriorities, + ForbiddenDomains, + NetworkingPriorities, + PermittedDomains, + Ports, + SoftwarePackageCategory, +) +from .types import PathType + +__all__ = [ + "AzureDnsZoneNames", + "AzureLocation", + "AzureSdkCredentialScope", + "AzureSubscriptionName", + "AzureVmSku", + "DatabaseSystem", + "EmailAddress", + "EntraGroupName", + "FirewallPriorities", + "ForbiddenDomains", + "Fqdn", + "Guid", + "IpAddress", + "NetworkingPriorities", + "PathType", + "PermittedDomains", + "Ports", + "SafeString", + "SoftwarePackageCategory", + "TimeZone", + "UniqueList", +] diff --git a/data_safe_haven/types/annotated_types.py b/data_safe_haven/types/annotated_types.py new file mode 100644 index 0000000000..ce07b4506e --- /dev/null +++ b/data_safe_haven/types/annotated_types.py @@ -0,0 +1,29 @@ +from collections.abc import Hashable +from typing import Annotated, TypeAlias, TypeVar + +from pydantic import Field +from pydantic.functional_validators import AfterValidator + +from data_safe_haven import validators + +AzureShortName = Annotated[str, Field(min_length=1, max_length=24)] +AzureSubscriptionName = Annotated[ + str, + Field(min_length=1, max_length=80), + AfterValidator(validators.azure_subscription_name), +] +AzureLocation = Annotated[str, AfterValidator(validators.azure_location)] +AzureVmSku = Annotated[str, AfterValidator(validators.azure_vm_sku)] +EmailAddress = Annotated[str, AfterValidator(validators.email_address)] +EntraGroupName = Annotated[str, AfterValidator(validators.entra_group_name)] +Fqdn = Annotated[str, AfterValidator(validators.fqdn)] +Guid = Annotated[str, AfterValidator(validators.aad_guid)] +IpAddress = Annotated[str, AfterValidator(validators.ip_address)] +SafeString = Annotated[str, AfterValidator(validators.safe_string)] +TimeZone = Annotated[str, AfterValidator(validators.timezone)] +TH = TypeVar("TH", bound=Hashable) +# type UniqueList[TH] = Annotated[list[TH], AfterValidator(validators.unique_list)] +# mypy doesn't support PEP695 type statements +UniqueList: TypeAlias = Annotated[ # noqa:UP040 + list[TH], AfterValidator(validators.unique_list) +] diff --git a/data_safe_haven/types/enums.py b/data_safe_haven/types/enums.py new file mode 100644 index 0000000000..170cbba4a0 --- /dev/null +++ b/data_safe_haven/types/enums.py @@ -0,0 +1,189 @@ +from enum import UNIQUE, Enum, verify + + +@verify(UNIQUE) +class AzureDnsZoneNames(tuple[str, ...], Enum): + """ + Return a list of DNS zones used by a given Azure resource type. + See https://learn.microsoft.com/en-us/azure/private-link/private-endpoint-dns for details. + """ + + AZURE_MONITOR = ( + "agentsvc.azure-automation.net", + "blob.core.windows.net", + "monitor.azure.com", + "ods.opinsights.azure.com", + "oms.opinsights.azure.com", + ) + STORAGE_ACCOUNT = ("blob.core.windows.net", "file.core.windows.net") + ALL = tuple(sorted(set(AZURE_MONITOR + STORAGE_ACCOUNT))) + + +@verify(UNIQUE) +class AzureSdkCredentialScope(str, Enum): + DEFAULT = "https://management.azure.com/.default" + GRAPH_API = "https://graph.microsoft.com/.default" + KEY_VAULT = "https://vault.azure.net" + + +@verify(UNIQUE) +class DatabaseSystem(str, Enum): + MICROSOFT_SQL_SERVER = "mssql" + POSTGRESQL = "postgresql" + + +@verify(UNIQUE) +class FirewallPriorities(int, Enum): + """Priorities for firewall rules.""" + + # All sources: 1000-1099 + ALL = 1000 + # SHM sources: 2000-2999 + SHM_IDENTITY_SERVERS = 2000 + # SRE sources: 3000-3999 + SRE_APT_PROXY_SERVER = 3000 + SRE_CLAMAV_MIRROR = 3100 + SRE_GUACAMOLE_CONTAINERS = 3200 + SRE_IDENTITY_CONTAINERS = 3300 + SRE_USER_SERVICES_SOFTWARE_REPOSITORIES = 3400 + SRE_WORKSPACES = 3500 + SRE_WORKSPACES_DENY = 3550 + + +@verify(UNIQUE) +class ForbiddenDomains(tuple[str, ...], Enum): + # Block snap upload to the Snap store at snapcraft.io + # Upload is through dashboard.snapscraft.io and requires a client to be logged in to + # an Ubuntu account. + # Login is through login.ubuntu.com. + # However, once successfully authorised, it is not necessary to reach + # login.ubuntu.com before uploading. + # Therefore we should block access to both domains. + UBUNTU_SNAPCRAFT = ( + "dashboard.snapcraft.io", # upload endpoint + "login.ubuntu.com", # login endpoint (provides auth for upload) + "upload.apps.ubuntu.com", + ) + + +@verify(UNIQUE) +class NetworkingPriorities(int, Enum): + """Priorities for network security group rules.""" + + # Azure services: 100 - 999 + AZURE_GATEWAY_MANAGER = 100 + AZURE_LOAD_BALANCER = 200 + AZURE_MONITORING_SOURCES = 300 + AZURE_PLATFORM_DNS = 400 + # DNS connections: 1000-1499 + INTERNAL_SRE_DNS_SERVERS = 1000 + # SRE connections: 1500-2999 + INTERNAL_SRE_SELF = 1500 + INTERNAL_SRE_APPLICATION_GATEWAY = 1600 + INTERNAL_SRE_APT_PROXY_SERVER = 1700 + INTERNAL_SRE_CLAMAV_MIRROR = 1800 + INTERNAL_SRE_DATA_CONFIGURATION = 1900 + INTERNAL_SRE_DATA_DESIRED_STATE = 1910 + INTERNAL_SRE_DATA_PRIVATE = 1920 + INTERNAL_SRE_GUACAMOLE_CONTAINERS = 2000 + INTERNAL_SRE_GUACAMOLE_CONTAINERS_SUPPORT = 2100 + INTERNAL_SRE_IDENTITY_CONTAINERS = 2200 + INTERNAL_SRE_MONITORING_TOOLS = 2300 + INTERNAL_SRE_USER_SERVICES_CONTAINERS = 2400 + INTERNAL_SRE_USER_SERVICES_CONTAINERS_SUPPORT = 2500 + INTERNAL_SRE_USER_SERVICES_DATABASES = 2600 + INTERNAL_SRE_USER_SERVICES_SOFTWARE_REPOSITORIES = 2700 + INTERNAL_SRE_WORKSPACES = 2800 + INTERNAL_SRE_ANY = 2999 + # Authorised external IPs: 3000-3499 + AUTHORISED_EXTERNAL_USER_IPS = 3100 + AUTHORISED_EXTERNAL_SSL_LABS_IPS = 3200 + # Wider internet: 3500-3999 + EXTERNAL_LINUX_UPDATES = 3600 + EXTERNAL_INTERNET = 3999 + # Deny all other: 4096 + ALL_OTHER = 4096 + + +@verify(UNIQUE) +class PermittedDomains(tuple[str, ...], Enum): + """Permitted domains for outbound connections.""" + + APT_REPOSITORIES = ( + # "apt.postgresql.org", + "archive.ubuntu.com", + "azure.archive.ubuntu.com", + "changelogs.ubuntu.com", + "cloudapp.azure.com", # this is where azure.archive.ubuntu.com is hosted + "deb.debian.org", + # "d20rj4el6vkp4c.cloudfront.net", + # "dbeaver.io", + # "packages.gitlab.com", + "packages.microsoft.com", + # "qgis.org", + "security.ubuntu.com", + # "ubuntu.qgis.org" + ) + AZURE_DNS_ZONES = AzureDnsZoneNames.ALL + CLAMAV_UPDATES = ( + "clamav.net", + "current.cvd.clamav.net", + "database.clamav.net.cdn.cloudflare.net", + "database.clamav.net", + ) + MICROSOFT_GRAPH_API = ("graph.microsoft.com",) + MICROSOFT_LOGIN = ("login.microsoftonline.com",) + MICROSOFT_IDENTITY = MICROSOFT_GRAPH_API + MICROSOFT_LOGIN + RSTUDIO_DEB = ("download1.rstudio.org",) + SOFTWARE_REPOSITORIES_PYTHON = ( + "files.pythonhosted.org", + "pypi.org", + ) + SOFTWARE_REPOSITORIES_R = ("cran.r-project.org",) + SOFTWARE_REPOSITORIES = SOFTWARE_REPOSITORIES_PYTHON + SOFTWARE_REPOSITORIES_R + UBUNTU_KEYSERVER = ("keyserver.ubuntu.com",) + UBUNTU_SNAPCRAFT = ( + "api.snapcraft.io", + "*.snapcraftcontent.com", + ) + ALL = tuple( + sorted( + set( + APT_REPOSITORIES + + AZURE_DNS_ZONES + + CLAMAV_UPDATES + + MICROSOFT_GRAPH_API + + MICROSOFT_LOGIN + + RSTUDIO_DEB + + SOFTWARE_REPOSITORIES_PYTHON + + SOFTWARE_REPOSITORIES_R + + UBUNTU_KEYSERVER + + UBUNTU_SNAPCRAFT + ) + ) + ) + + +@verify(UNIQUE) +class Ports(str, Enum): + AZURE_MONITORING = "514" + DNS = "53" + HKP = "11371" + HTTP = "80" + HTTPS = "443" + LDAP_APRICOT = "1389" + LINUX_UPDATE = "8000" + MSSQL = "1433" + NEXUS = "8081" + NTP = "123" + POSTGRESQL = "5432" + RDP = "3389" + SSH = "22" + SQUID = "3128" + + +@verify(UNIQUE) +class SoftwarePackageCategory(str, Enum): + ANY = "any" + PRE_APPROVED = "pre-approved" + NONE = "none" diff --git a/data_safe_haven/types/types.py b/data_safe_haven/types/types.py new file mode 100644 index 0000000000..527e4e2ea3 --- /dev/null +++ b/data_safe_haven/types/types.py @@ -0,0 +1,3 @@ +from pathlib import Path + +PathType = str | Path diff --git a/data_safe_haven/utility/__init__.py b/data_safe_haven/utility/__init__.py new file mode 100644 index 0000000000..e523a00bb4 --- /dev/null +++ b/data_safe_haven/utility/__init__.py @@ -0,0 +1,5 @@ +from .file_reader import FileReader + +__all__ = [ + "FileReader", +] diff --git a/data_safe_haven/utility/file_reader.py b/data_safe_haven/utility/file_reader.py new file mode 100644 index 0000000000..8dd6a2387e --- /dev/null +++ b/data_safe_haven/utility/file_reader.py @@ -0,0 +1,33 @@ +"""Read local files, handling template expansion if needed""" + +import pathlib +from typing import Any + +import chevron + +from data_safe_haven.functions import sha256hash +from data_safe_haven.types import PathType + + +class FileReader: + """Read local files, handling template expansion if needed""" + + def __init__(self, file_path: PathType, *args: Any, **kwargs: Any): + self.file_path = pathlib.Path(file_path).resolve() + super().__init__(*args, **kwargs) + + @property + def name(self) -> str: + return self.file_path.name.replace(".mustache", "") + + def file_contents(self, mustache_values: dict[str, Any] | None = None) -> str: + """Read a local file into a string, expanding template values""" + with open(self.file_path, encoding="utf-8") as source_file: + if mustache_values: + contents = chevron.render(source_file, mustache_values) + else: + contents = source_file.read() + return contents + + def sha256(self) -> str: + return sha256hash(self.file_contents()) diff --git a/data_safe_haven/validators/__init__.py b/data_safe_haven/validators/__init__.py new file mode 100644 index 0000000000..849b199857 --- /dev/null +++ b/data_safe_haven/validators/__init__.py @@ -0,0 +1,47 @@ +from .typer import ( + typer_aad_guid, + typer_azure_subscription_name, + typer_azure_vm_sku, + typer_email_address, + typer_entra_group_name, + typer_fqdn, + typer_ip_address, + typer_safe_string, + typer_timezone, +) +from .validators import ( + aad_guid, + azure_location, + azure_subscription_name, + azure_vm_sku, + email_address, + entra_group_name, + fqdn, + ip_address, + safe_string, + timezone, + unique_list, +) + +__all__ = [ + "aad_guid", + "azure_location", + "azure_subscription_name", + "azure_vm_sku", + "email_address", + "entra_group_name", + "fqdn", + "ip_address", + "safe_string", + "timezone", + "typer_aad_guid", + "typer_azure_subscription_name", + "typer_azure_vm_sku", + "typer_email_address", + "typer_entra_group_name", + "typer_fqdn", + "typer_ip_address", + "typer_safe_string", + "typer_timezone", + "unique_list", +] diff --git a/data_safe_haven/validators/typer.py b/data_safe_haven/validators/typer.py new file mode 100644 index 0000000000..f1c8239ecc --- /dev/null +++ b/data_safe_haven/validators/typer.py @@ -0,0 +1,37 @@ +from collections.abc import Callable +from typing import Any + +from typer import BadParameter + +from . import validators + + +def typer_validator_factory(validator: Callable[[Any], Any]) -> Callable[[Any], Any]: + """Factory to create validation functions for Typer from Pydantic validators""" + + def typer_validator(x: Any) -> Any: + # Return unused optional arguments + if x is None: + return x + + # Validate input, catching ValueError to raise Typer Exception + try: + validator(x) + return x + except ValueError as exc: + raise BadParameter(str(exc)) from exc + + return typer_validator + + +typer_aad_guid = typer_validator_factory(validators.aad_guid) +typer_azure_subscription_name = typer_validator_factory( + validators.azure_subscription_name +) +typer_azure_vm_sku = typer_validator_factory(validators.azure_vm_sku) +typer_email_address = typer_validator_factory(validators.email_address) +typer_entra_group_name = typer_validator_factory(validators.entra_group_name) +typer_fqdn = typer_validator_factory(validators.fqdn) +typer_ip_address = typer_validator_factory(validators.ip_address) +typer_safe_string = typer_validator_factory(validators.safe_string) +typer_timezone = typer_validator_factory(validators.timezone) diff --git a/data_safe_haven/validators/validators.py b/data_safe_haven/validators/validators.py new file mode 100644 index 0000000000..dd4458ec57 --- /dev/null +++ b/data_safe_haven/validators/validators.py @@ -0,0 +1,152 @@ +import ipaddress +import re +from collections.abc import Hashable +from typing import TypeVar + +import pytz +from fqdn import FQDN + + +def aad_guid(aad_guid: str) -> str: + if not re.match( + r"^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$", + aad_guid, + ): + msg = "Expected GUID, for example '10de18e7-b238-6f1e-a4ad-772708929203'." + raise ValueError(msg) + return aad_guid + + +def azure_location(azure_location: str) -> str: + # Generate a list of locations with the following command: + # `az account list-locations --query "[?metadata.regionType == 'Physical'].name"` + locations = [ + "australiacentral", + "australiacentral2", + "australiaeast", + "australiasoutheast", + "brazilsouth", + "brazilsoutheast", + "brazilus", + "canadacentral", + "canadaeast", + "centralindia", + "centralus", + "centraluseuap", + "eastasia", + "eastus", + "eastus2", + "eastus2euap", + "eastusstg", + "francecentral", + "francesouth", + "germanynorth", + "germanywestcentral", + "israelcentral", + "italynorth", + "japaneast", + "japanwest", + "jioindiacentral", + "jioindiawest", + "koreacentral", + "koreasouth", + "mexicocentral", + "northcentralus", + "northeurope", + "norwayeast", + "norwaywest", + "polandcentral", + "qatarcentral", + "southafricanorth", + "southafricawest", + "southcentralus", + "southeastasia", + "southindia", + "spaincentral", + "swedencentral", + "switzerlandnorth", + "switzerlandwest", + "uaecentral", + "uaenorth", + "uksouth", + "ukwest", + "westcentralus", + "westeurope", + "westindia", + "westus", + "westus2", + "westus3", + ] + if azure_location not in locations: + msg = "Expected valid Azure location, for example 'uksouth'." + raise ValueError(msg) + return azure_location + + +def azure_subscription_name(subscription_name: str) -> str: + # https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-name-rules + if not re.match(r"^[a-zA-Z0-9\- \[\]]+$", subscription_name): + msg = "Azure subscription names can only contain alphanumeric characters, spaces and particular special characters." + raise ValueError(msg) + return subscription_name + + +def azure_vm_sku(azure_vm_sku: str) -> str: + if not re.match(r"^(Standard|Basic)_\w+$", azure_vm_sku): + msg = "Expected valid Azure VM SKU, for example 'Standard_D2s_v4'." + raise ValueError(msg) + return azure_vm_sku + + +def fqdn(domain: str) -> str: + trial_fqdn = FQDN(domain) + if not trial_fqdn.is_valid: + msg = "Expected valid fully qualified domain name, for example 'example.com'." + raise ValueError(msg) + return domain + + +def email_address(email_address: str) -> str: + if not re.match(r"^\S+@\S+$", email_address): + msg = "Expected valid email address, for example 'sherlock@holmes.com'." + raise ValueError(msg) + return email_address + + +def entra_group_name(entra_group_name: str) -> str: + if entra_group_name.startswith(" "): + msg = "Entra group names cannot start with a space." + raise ValueError(msg) + return entra_group_name + + +def ip_address(ip_address: str) -> str: + try: + return str(ipaddress.ip_network(ip_address)) + except Exception as exc: + msg = "Expected valid IPv4 address, for example '1.1.1.1'." + raise ValueError(msg) from exc + + +def safe_string(safe_string: str) -> str: + if not re.match(r"^[a-zA-Z0-9_-]*$", safe_string) or not safe_string: + msg = "Expected valid string containing only letters, numbers, hyphens and underscores." + raise ValueError(msg) + return safe_string + + +def timezone(timezone: str) -> str: + if timezone not in pytz.all_timezones: + msg = "Expected valid timezone, for example 'Europe/London'." + raise ValueError(msg) + return timezone + + +TH = TypeVar("TH", bound=Hashable) + + +def unique_list(items: list[TH]) -> list[TH]: + if len(items) != len(set(items)): + msg = "All items must be unique." + raise ValueError(msg) + return items diff --git a/data_safe_haven/version.py b/data_safe_haven/version.py new file mode 100644 index 0000000000..bfb9e4b4b8 --- /dev/null +++ b/data_safe_haven/version.py @@ -0,0 +1,2 @@ +__version__ = "5.0.0" +__version_info__ = tuple(__version__.split(".")) diff --git a/deployment/CheckRequirements.ps1 b/deployment/CheckRequirements.ps1 deleted file mode 100644 index e49e3c9fc1..0000000000 --- a/deployment/CheckRequirements.ps1 +++ /dev/null @@ -1,71 +0,0 @@ -param ( - [Parameter(Mandatory = $false, HelpMessage = "If this is set, install any missing modules, otherwise warn.")] - [switch]$InstallMissing, - [Parameter(Mandatory = $false, HelpMessage = "If this is set, install any dev modules, otherwise ignore them.")] - [switch]$IncludeDev -) - -Import-Module $PSScriptRoot/common/Logging -Force -ErrorAction Stop - -# Requirements -$PowershellSupportedVersion = "7.4.1" -$ModuleVersionRequired = @{ - "Az.Accounts" = @("ge", "2.11.1") - "Az.Automation" = @("ge", "1.9.0") - "Az.Compute" = @("ge", "5.3.0") - "Az.DataProtection" = @("ge", "2.4.0") - "Az.Dns" = @("ge", "1.1.2") - "Az.KeyVault" = @("ge", "4.9.1") - "Az.Monitor" = @("ge", "4.2.0") - "Az.MonitoringSolutions" = @("ge", "0.1.0") - "Az.Network" = @("ge", "5.3.0") - "Az.OperationalInsights" = @("ge", "3.1.0") - "Az.PrivateDns" = @("ge", "1.0.3") - "Az.RecoveryServices" = @("ge", "5.4.1") - "Az.Resources" = @("ge", "6.5.1") - "Az.Storage" = @("ge", "4.7.0") - "Microsoft.Graph.Authentication" = @("ge", "1.21.0") - "Microsoft.Graph.Applications" = @("ge", "1.21.0") - "Microsoft.Graph.Identity.DirectoryManagement" = @("ge", "1.21.0") - "Microsoft.Graph.Users" = @("ge", "1.21.0") - "Posh-ACME" = @("ge", "4.23.0") - "Poshstache" = @("ge", "0.1.10") - "Powershell-Yaml" = @("ge", "0.4.2") -} -if ($IncludeDev.IsPresent) { - $ModuleVersionRequired["Pester"] = ("ge", "5.1.0") - $ModuleVersionRequired["PSScriptAnalyzer"] = ("ge", "1.19.0") -} - -# Powershell version -$PowershellVersion = $PSVersionTable.PSVersion -if ($PowershellVersion -ne $PowershellSupportedVersion) { - Add-LogMessage -Level Warning "Powershell version: $PowershellVersion" - Add-LogMessage -Level Warning "The currently supported version of Powershell is $PowershellSupportedVersion." - Add-LogMessage -Level Warning "In case of errors originating from Powershell code, ensure that you are running the currently supported version." -} else { - Add-LogMessage -Level Success "Powershell version: $PowershellVersion" -} - - -# Powershell modules -$RepositoryName = "PSGallery" -foreach ($ModuleName in $ModuleVersionRequired.Keys) { - $RequirementType, $RequiredVersion = $ModuleVersionRequired[$ModuleName] - if ($RequirementType -eq "eq") { - $CurrentVersion = (Get-Module $ModuleName -ListAvailable | Where-Object { $_.Version -eq $RequiredVersion } | Select-Object -First 1).Version - } elseif ($RequirementType -eq "ge") { - $CurrentVersion = (Get-Module $ModuleName -ListAvailable | Where-Object { $_.Version -ge $RequiredVersion } | Select-Object -First 1).Version - } else { - Add-LogMessage -Level Fatal "Did not recognise requirement: '$ModuleName $RequirementType $RequiredVersion'" - } - if ($CurrentVersion -ge $RequiredVersion) { - Add-LogMessage -Level Success "$ModuleName module version: $CurrentVersion" - } elseif ($InstallMissing.IsPresent) { - Add-LogMessage -Level Info "Installing $ModuleName module version: $RequiredVersion..." - Install-Module -Name $ModuleName -RequiredVersion $RequiredVersion -Repository $RepositoryName -Scope CurrentUser - } else { - Add-LogMessage -Level Warning "$ModuleName module version ($CurrentVersion) does not meet the minimum requirement: $RequiredVersion!" - Add-LogMessage -Level Info "Please update the $ModuleName module using: Install-Module -Name $ModuleName -RequiredVersion $RequiredVersion -Repository $RepositoryName" - } -} diff --git a/deployment/ShowConfigFile.ps1 b/deployment/ShowConfigFile.ps1 deleted file mode 100644 index 5f73c7a52f..0000000000 --- a/deployment/ShowConfigFile.ps1 +++ /dev/null @@ -1,17 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId = $null -) - -Import-Module $PSScriptRoot/common/Configuration -Force -ErrorAction Stop - - -# Generate and return the full config for the SHM or SRE -if ($sreId) { - $config = Get-SreConfig -shmId $shmId -sreId $sreId -} else { - $config = Get-ShmConfig -shmId $shmId -} -Write-Output ($config | ConvertTo-Json -Depth 99) diff --git a/deployment/administration/SHM_Add_AAD_Licences.ps1 b/deployment/administration/SHM_Add_AAD_Licences.ps1 deleted file mode 100644 index 1d25d55e80..0000000000 --- a/deployment/administration/SHM_Add_AAD_Licences.ps1 +++ /dev/null @@ -1,41 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "SKU for the licence you want to assign")] - [string]$licenceSku = "AAD_PREMIUM" -) - -Import-Module Microsoft.Graph.Authentication -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config -# ---------- -$config = Get-ShmConfig -shmId $shmId - - -# Connect to Microsoft Graph -# -------------------------- -if (-not (Get-MgContext)) { - Add-LogMessage -Level Info "Attempting to authenticate with Microsoft Graph. Please sign in with an account with admin rights over the Azure Active Directory you plan to use." - Connect-MgGraph -TenantId $config.azureAdTenantId -Scopes "Directory.ReadWrite.All" -ErrorAction Stop -ContextScope Process -} -if (Get-MgContext) { - Add-LogMessage -Level Success "Authenticated with Microsoft Graph" -} else { - Add-LogMessage -Level Fatal "Failed to authenticate with Microsoft Graph" -} - - -# Get the appropriate licence -# --------------------------- -$LicenceSkuId = (Get-MgSubscribedSku | Where-Object -Property SkuPartNumber -Value $licenceSku -EQ).SkuId -Add-LogMessage -Level Info "Preparing to add licence '$licenceSku' ($($Licence.SkuId)) to unlicenced users" - - -# Find all users without assigned licences who have an OnPremisesSecurityIdentifier. -# This indicates that they originated from a local AD. -# ---------------------------------------------------------------------------------- -$unlicensedUsers = Get-MgUser | Where-Object { -Not $_.AssignedLicenses } | Where-Object { $_.OnPremisesSecurityIdentifier } -Add-LogMessage -Level Info "Assigning licences to $($unlicensedUsers.Count) unlicenced users" -$unlicensedUsers | ForEach-Object { Set-MgUserLicense -UserId $_.Id -AddLicenses @{SkuId = $LicenceSkuId } -RemoveLicenses @() } diff --git a/deployment/administration/SHM_Delete_Unassigned_Users.ps1 b/deployment/administration/SHM_Delete_Unassigned_Users.ps1 deleted file mode 100644 index 3b551a997b..0000000000 --- a/deployment/administration/SHM_Delete_Unassigned_Users.ps1 +++ /dev/null @@ -1,33 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "No-op mode which will not remove anything")] - [Switch]$dryRun -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - -# Get config -# ------------------------------- -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext - -# Delete users not currently in a security group -# ---------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop -$script = "remote/Delete_Unassigned_Users.ps1" - -# Passing a param to a remote script requires it to be a string -if ($dryRun.IsPresent) { - Add-LogMessage -Level Info "Listing users not assigned to any security group from $($config.dc.vmName)..." - $params = @{dryRun = "yes" } -} else { - Add-LogMessage -Level Info "Deleting users not assigned to any security group from $($config.dc.vmName)..." - $params = @{dryRun = "no" } -} -$result = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $script -VMName $config.dc.vmName -ResourceGroupName $config.dc.rg -Parameter $params - -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SHM_Expand_Allowlist_Dependencies.ps1 b/deployment/administration/SHM_Expand_Allowlist_Dependencies.ps1 deleted file mode 100644 index 577f71bba6..0000000000 --- a/deployment/administration/SHM_Expand_Allowlist_Dependencies.ps1 +++ /dev/null @@ -1,253 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Mirror type to expand (either 'pypi' or 'cran')")] - [ValidateSet("pypi", "cran")] - [string]$Repository, - [Parameter(Mandatory = $true, HelpMessage = "API key for libraries.io")] - [string]$ApiKey, - [Parameter(Mandatory = $false, HelpMessage = "Only consider the most recent NVersions.")] - [int]$NVersions = -1, - [Parameter(Mandatory = $false, HelpMessage = "Timeout in minutes.")] - [int]$TimeoutMinutes = 600, - [Parameter(Mandatory = $false, HelpMessage = "Do not use a cache file.")] - [switch]$NoCache -) - -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get normalised name for a package -# --------------------------------- -function Test-PackageExistence { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of package repository")] - [string]$Repository, - [Parameter(Mandatory = $true, HelpMessage = "Name of package to get dependencies for")] - [string]$Package, - [Parameter(Mandatory = $false, HelpMessage = "Repository ID for RStudio package manager")] - [string]$RepositoryId, - [Parameter(Mandatory = $false, HelpMessage = "API key for libraries.io")] - [string]$ApiKey - ) - try { - if ($Repository -eq "pypi") { - # The best PyPI results come from the package JSON files - $response = Invoke-RestMethod -Uri "https://pypi.org/${Repository}/${Package}/json" -MaximumRetryCount 4 -RetryIntervalSec 1 -ErrorAction Stop - if ($response -is [String]) { $response = $response | ConvertFrom-Json -AsHashtable } - $versions = $response.releases | Get-Member -MemberType NoteProperty | ForEach-Object { $_.Name } - $name = $response.info.name - } elseif ($Repository -eq "cran") { - # Use the RStudio package manager for CRAN packages - $response = Invoke-RestMethod -Uri "https://packagemanager.rstudio.com/__api__/repos/${RepositoryId}/packages?name=${Package}&case_insensitive=true" -MaximumRetryCount 4 -RetryIntervalSec 1 -ErrorAction Stop - if ($response -is [String]) { $response = $response | ConvertFrom-Json -AsHashtable } - $name = $response.name - $response = Invoke-RestMethod -Uri "https://packagemanager.rstudio.com/__api__/repos/${RepositoryId}/packages/${name}" -MaximumRetryCount 4 -RetryIntervalSec 1 -ErrorAction Stop - $versions = @($response.version) + ($response.archived | ForEach-Object { $_.version }) - } else { - # For other repositories we use libraries.io - # As we are rate-limited to 60 requests per minute this request can fail. If it does, we retry every few seconds for 1 minute - $response = Invoke-RestMethod -Uri "https://libraries.io/api/${Repository}/${Package}?api_key=${ApiKey}" -MaximumRetryCount 16 -RetryIntervalSec 4 -ErrorAction Stop - if ($response -is [String]) { $response = $response | ConvertFrom-Json -AsHashtable } - $versions = $response.versions | ForEach-Object { $_.number } - $name = $response.Name - } - return @{ - versions = ($versions | Sort-Object -Unique) - name = $name - } - } catch [Microsoft.PowerShell.Commands.HttpResponseException] { - Add-LogMessage -Level Error "... $Package could not be found in ${Repository}" - throw $_.Exception # rethrow the original exception - } -} - - -# Get dependencies for all versions of a given package -# ---------------------------------------------------- -function Get-Dependencies { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of package repository")] - [ValidateNotNullOrEmpty()] - [string]$Repository, - [Parameter(Mandatory = $true, HelpMessage = "Name of package to get dependencies for")] - [ValidateNotNullOrEmpty()] - [string]$Package, - [Parameter(Mandatory = $true, HelpMessage = "Versions of package to get dependencies for")] - [ValidateNotNullOrEmpty()] - [string[]]$Versions, - [Parameter(Mandatory = $true, HelpMessage = "Hashtable containing cached dependencies")] - [ValidateNotNullOrEmpty()] - [hashtable]$Cache, - [Parameter(Mandatory = $true, HelpMessage = "Only consider the most recent NVersions. Set to -1 to consider all versions.")] - [ValidateNotNullOrEmpty()] - [int]$NVersions, - [Parameter(Mandatory = $false, HelpMessage = "API key for libraries.io")] - [string]$ApiKey - ) - $dependencies = @() - Add-LogMessage -Level Info "... found $($Versions.Count) versions of $Package" - $MostRecentVersions = ($NVersions -gt 0) ? ($Versions | Select-Object -Last $NVersions) : $Versions - # If the package is not in the cache or is an invalid/empty object then replace it with a new ordered hashtable - if (($Package -notin $Cache[$Repository].Keys) -or (-not $Cache[$Repository][$Package])) { - $Cache[$Repository][$Package] = [ordered]@{} - } - foreach ($Version in $MostRecentVersions) { - if ($Version -notin $Cache[$Repository][$Package].Keys) { - try { - if ($Repository -eq "pypi") { - # The best PyPI results come from the package JSON files - $response = Invoke-RestMethod -Uri "https://pypi.org/${Repository}/${Package}/${Version}/json" -MaximumRetryCount 4 -RetryIntervalSec 1 -ErrorAction Stop - # Add canonical names to dependencies - $Cache[$Repository][$Package][$Version] = @($response.info.requires_dist | Where-Object { $_ -and ($_ -notmatch "extra ==") } | ForEach-Object { ($_ -split '[;[( ><=!~]')[0].Trim().ToLower() } | Sort-Object -Unique) - } else { - # For other repositories we use libraries.io - try { - # Make an initial attempt without any retries - $response = Invoke-RestMethod -Uri "https://libraries.io/api/${Repository}/${Package}/${Version}/dependencies?api_key=${ApiKey}" -ErrorAction Stop - } catch [Microsoft.PowerShell.Commands.HttpResponseException] { - # If the failure is due to TooManyRequests (429) then retry for 1 minute - if ($_.Exception.Response.StatusCode -eq "TooManyRequests") { - $response = Invoke-RestMethod -Uri "https://libraries.io/api/${Repository}/${Package}/${Version}/dependencies?api_key=${ApiKey}" -MaximumRetryCount 16 -RetryIntervalSec 4 -ErrorAction Stop - } - } - $Cache[$Repository][$Package][$Version] = @($response.dependencies | Where-Object { $_.requirements -ne "extra" } | Where-Object { $_.kind -ne "suggests" } | ForEach-Object { $_.name.Replace(";", "") }) | Sort-Object -Unique - } - } catch { - Add-LogMessage -Level Warning "No dependencies found for ${Package} (${Version}) from ${Repository}!" - $Cache[$Repository][$Package][$Version] = @() - } - } - $dependencies += $Cache[$Repository][$Package][$Version] - } - if (-not $dependencies) { return @() } - return $($dependencies | Where-Object { $_ } | Sort-Object -Unique) -} - - -# Load list of core packages -# -------------------------- -$languageName = @{cran = "r"; pypi = "python" }[$Repository] -$coreAllowlistPath = Join-Path $PSScriptRoot ".." ".." "environment_configs" "package_lists" "allowlist-core-${languageName}-${Repository}-tier3.list" -$extraAllowlistPath = Join-Path $PSScriptRoot ".." ".." "environment_configs" "package_lists" "allowlist-extra-${languageName}-${Repository}-tier3.list" -$fullAllowlistPath = Join-Path $PSScriptRoot ".." ".." "environment_configs" "package_lists" "allowlist-full-${languageName}-${Repository}-tier3.list" -$dependencyCachePath = Join-Path $PSScriptRoot ".." ".." "environment_configs" "package_lists" "dependency-cache.json" -$corePackageList = (Get-Content $coreAllowlistPath) + (Get-Content $extraAllowlistPath) | Sort-Object -Unique - -# Initialise the package queue -# ---------------------------- -$queue = New-Object System.Collections.Queue -$corePackageList | ForEach-Object { $queue.Enqueue($_) } -$allDependencies = @() - - -# Load any previously-cached dependencies -# --------------------------------------- -$dependencyCache = [ordered]@{} -if (-not $NoCache) { - if (Test-Path $dependencyCachePath -PathType Leaf) { - $dependencyCache = Get-Content $dependencyCachePath | ConvertFrom-Json -AsHashtable - } - if (-not $dependencyCache) { $dependencyCache = [ordered]@{} } -} -if ($Repository -notin $dependencyCache.Keys) { $dependencyCache[$Repository] = [ordered]@{} } -if ("unavailable_packages" -notin $dependencyCache.Keys) { $dependencyCache["unavailable_packages"] = [ordered]@{} } -if ($Repository -notin $dependencyCache["unavailable_packages"].Keys) { $dependencyCache["unavailable_packages"][$Repository] = @() } - - -# Load RStudio repository ID if relevant -# -------------------------------------- -if ($Repository -eq "cran") { - $response = Invoke-RestMethod -Uri "https://packagemanager.rstudio.com/__api__/repos" -MaximumRetryCount 4 -RetryIntervalSec 1 -ErrorAction Stop - $RepositoryId = $response | Where-Object { $_.name -eq $Repository } | ForEach-Object { $_.id } | Select-Object -First 1 -} else { - $RepositoryId = $null -} - - -# Resolve packages iteratively until the queue is empty -# ----------------------------------------------------- -$packageAllowlist = @() -Add-LogMessage -Level Info "Preparing to expand dependencies for $($queue.Count) package(s) from $Repository" -$LatestTime = (Get-Date) + (New-TimeSpan -Minutes $TimeoutMinutes) -while ($queue.Count) { - try { - $unverifiedName = $queue.Dequeue() - # Ignore this packages if it has already been processed - if ($unverifiedName -in $packageAllowlist) { continue } - # Check that the package exists and add it to the allowlist if so - Add-LogMessage -Level Info "Looking for '${unverifiedName}' in ${Repository}..." - $packageData = Test-PackageExistence -Repository $Repository -Package $unverifiedName -ApiKey $ApiKey -RepositoryId $RepositoryId - if (-not $($packageData.name)) { - Add-LogMessage -Level Error "Package '${unverifiedName}' could not be found!" - continue - } - if ($packageData.name -cne $unverifiedName) { - Add-LogMessage -Level Warning "Package '${unverifiedName}' should be '$($packageData.name)'" - } - $packageAllowlist += @($packageData.name) - # Look for dependencies and add them to the queue - if ($packageData.versions) { - Add-LogMessage -Level Info "... finding dependencies for $($packageData.name)" - $dependencies = Get-Dependencies -Repository $Repository -Package $packageData.name -Versions $packageData.versions -ApiKey $ApiKey -Cache $dependencyCache -NVersions $NVersions - Add-LogMessage -Level Info "... found $($dependencies.Count) dependencies: $dependencies" - $newPackages = $dependencies | Where-Object { $_ -notin $packageAllowlist } | Where-Object { $_ -notin $allDependencies } | Where-Object { $_ -notin $dependencyCache["unavailable_packages"][$Repository] } - $newPackages | ForEach-Object { $queue.Enqueue($_) } - $allDependencies += $dependencies - } else { - Add-LogMessage -Level Warning "... could not find any versions of $($packageData.name)" - } - } catch [Microsoft.PowerShell.Commands.HttpResponseException] { - # If this package could not be found then mark it as unavailable - Add-LogMessage -Level Error "... marking '$unverifiedName' as unavailable" - $dependencyCache["unavailable_packages"][$Repository] += @($unverifiedName) | Where-Object { $_ -notin $dependencyCache["unavailable_packages"][$Repository] } - } - Add-LogMessage -Level Info "... there are $($packageAllowlist.Count) package(s) on the allowlist" - Add-LogMessage -Level Info "... there are $($queue.Count) package(s) in the queue" - # Write to the dependency file after each package in case the script terminates early - if (-not $NoCache) { - $dependencyCache | ConvertTo-Json -Depth 99 | Out-File $dependencyCachePath - } - # If we have exceeded the timeout then set the TIMEOUT_REACHED switch and break even if there are packages left in the queue - if ((Get-Date) -ge $LatestTime) { - Add-LogMessage -Level Error "Maximum runtime exceeded with $($queue.Count) package(s) left in the queue!" - Write-Output "TIMEOUT_REACHED=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - break - } -} - -# After processing all packages ensure that the dependencies cache is sorted -Add-LogMessage -Level Info "Sorting dependency cache..." -$sortedDependencies = [ordered]@{} -foreach ($repoName in $($dependencyCache.Keys | Sort-Object)) { - $sortedDependencies[$repoName] = [ordered]@{} - foreach ($pkgName in $($dependencyCache[$repoName].Keys | Sort-Object)) { - $sortedDependencies[$repoName][$pkgName] = [ordered]@{} - foreach ($version in $($dependencyCache[$repoName][$pkgName].Keys | Sort-Object)) { - $sortedDependencies[$repoName][$pkgName][$version] = @($dependencyCache[$repoName][$pkgName][$version] | Sort-Object -Unique) - } - } -} -foreach ($repoName in $($dependencyCache["unavailable_packages"].Keys | Sort-Object)) { - $sortedDependencies["unavailable_packages"][$repoName] = @() - $sortedDependencies["unavailable_packages"][$repoName] += $dependencyCache["unavailable_packages"][$repoName] | Sort-Object -Unique -} -if (-not $NoCache) { - $sortedDependencies | ConvertTo-Json -Depth 99 | Out-File $dependencyCachePath -} - - -# Add a log message for any problematic packages -# ---------------------------------------------- -$unneededCorePackages = $corePackageList | Where-Object { $_ -In $allDependencies } | Sort-Object -Unique -if ($unneededCorePackages) { - Add-LogMessage -Level Info "... found $($unneededCorePackages.Count) explicitly requested package(s) that would have been allowed as dependencies of other packages: $unneededCorePackages" -} -$unavailablePackages = $sortedDependencies["unavailable_packages"][$Repository] -if ($unavailablePackages) { - Add-LogMessage -Level Warning "... ignored $($unavailablePackages.Count) dependencies that could not be found in ${Repository}: $unavailablePackages" -} - - -# Write the full package list to the allowlist -# -------------------------------------------- -Add-LogMessage -Level Info "Writing $($packageAllowlist.Count) package(s) to the allowlist..." -$packageAllowlist | Sort-Object -Unique | Out-File $fullAllowlistPath diff --git a/deployment/administration/SHM_Manage_VMs.ps1 b/deployment/administration/SHM_Manage_VMs.ps1 deleted file mode 100644 index cc9c525418..0000000000 --- a/deployment/administration/SHM_Manage_VMs.ps1 +++ /dev/null @@ -1,93 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter action (EnsureStarted, EnsureStopped)")] - [ValidateSet("EnsureStarted", "EnsureStopped")] - [string]$Action, - [Parameter(Mandatory = $false, HelpMessage = "Enter VM group (Identity, Mirrors or All)")] - [ValidateSet("Identity", "Mirrors", "All")] - [string]$Group = "All", - [Parameter(Mandatory = $false, HelpMessage = "Exclude Firewall (only has an effect if Action is 'EnsureStopped'")] - [switch]$ExcludeFirewall -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - -# Get all VMs in matching resource groups -$vmsByRg = Get-VMsByResourceGroupPrefix -ResourceGroupPrefix $config.rgPrefix - -# Remove some VMs from consideration -if ($Group -eq "Identity") { - # Remove Mirror VMs from list - $vmsByRg.Remove($config.mirrors.rg) -} elseif ($Group -eq "Mirrors") { - # Remove Identity VMs from list - $vmsByRg.Remove($config.dc.rg) -} - -switch ($Action) { - "EnsureStarted" { - if (($Group -eq "Identity") -or ($Group -eq "All")) { - # Ensure Firewall is started - $null = Start-Firewall -Name $config.firewall.name -ResourceGroupName $config.network.vnet.rg -VirtualNetworkName $config.network.vnet.name - # Ensure Identity VMs are started before any other VMs - Add-LogMessage -Level Info "Ensuring VMs in resource group '$($config.dc.rg)' are started..." - # Primary DC must be started before Secondary DC - $primaryDCAlreadyRunning = Confirm-VmRunning -Name $config.dc.vmName -ResourceGroupName $config.dc.rg - if ($primaryDCAlreadyRunning) { - Add-LogMessage -Level InfoSuccess "VM '$($config.dc.vmName)' already running." - # Start Secondary DC - Start-VM -Name $config.dcb.vmName -ResourceGroupName $config.dc.rg - } else { - # Stop Secondary DC as it must start after Primary DC - Add-LogMessage -Level Info "Stopping Secondary DC and NPS as Primary DC is not running." - Stop-Vm -Name $config.dcb.vmName -ResourceGroupName $config.dc.rg - # Start Primary DC - Start-VM -Name $config.dc.vmName -ResourceGroupName $config.dc.rg - # Start Secondary DC - Start-VM -Name $config.dcb.vmName -ResourceGroupName $config.dc.rg - } - # Remove Identity VMs from general VM list so they are not processed twice - $vmsByRg.Remove($config.dc.rg) - } - # Process remaining SHM VMs covered by the specified group - foreach ($key in $vmsByRg.Keys) { - $rgVms = $vmsByRg[$key] - $rgName = $rgVms[0].ResourceGroupName - Add-LogMessage -Level Info "Ensuring VMs in resource group '$rgName' are started..." - foreach ($vm in $rgVms) { - Start-VM -VM $vm - } - } - } - "EnsureStopped" { - # Stop VMs - foreach ($key in $vmsByRg.Keys) { - $rgVms = $vmsByRg[$key] - $rgName = $rgVms[0].ResourceGroupName - Add-LogMessage -Level Info "Ensuring VMs in resource group '$rgName' are stopped..." - foreach ($vm in $rgVms) { - Stop-VM -VM $vm -NoWait - } - } - if (-not $ExcludeFirewall) { - $null = Stop-Firewall -Name $config.firewall.name -ResourceGroupName $config.network.vnet.rg -NoWait - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SHM_Minimise_VM_Sizes.ps1 b/deployment/administration/SHM_Minimise_VM_Sizes.ps1 deleted file mode 100644 index 92a4f54796..0000000000 --- a/deployment/administration/SHM_Minimise_VM_Sizes.ps1 +++ /dev/null @@ -1,59 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(HelpMessage = "Enter VM Size for all VMs")] - [ValidateSet("Tiny", "Small")] - [string]$Size = "Small" -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - -# Set VM size -if ($Size -eq "Tiny") { - $vmSize = "Standard_B2ms" -} elseif ($Size -eq "Small") { - $vmSize = "Standard_D2_v3" -} - -# Get all VMs in matching resource groups -$vmsByRg = Get-VMsByResourceGroupPrefix -ResourceGroupPrefix $config.rgPrefix - -foreach ($key in $vmsByRg.Keys) { - $rgVms = $vmsByRg[$key] - $rgName = $rgVms[0].ResourceGroupName - Add-LogMessage -Level Info "Ensuring VMs in resource group '$rgName' are resized to '$vmSize'..." - foreach ($vm in $rgVms) { - if ($vm.HardwareProfile.VmSize -eq $vmSize) { - Add-LogMessage -Level InfoSuccess "VM '$($VM.Name)' is already size '$vmSize'." - } else { - $vmStatuses = (Get-AzVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Status).Statuses.Code - if ($vmStatuses -contains "ProvisioningState/succeeded") { - $vm.HardwareProfile.VmSize = $vmSize - $result = Update-AzVM -VM $vm -ResourceGroupName $vm.ResourceGroupName -NoWait - if ($result.IsSuccessStatusCode) { - Add-LogMessage -Level Success "Resize request to '$vmSize' accepted for VM '$($vm.Name)'.'" - } else { - Add-LogMessage -Level Fatal "Unexpected status '$($result.StatusCode) ($($result.ReasonPhrase))' encountered when requesting resize of VM '$($vm.Name)' to '$vmSize').'" - } - } else { - Add-LogMessage -Level Warning "VM '$($vm.Name)' not in supported status: $vmStatus. No action taken." - } - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SHM_Package_Repository_Teardown.ps1 b/deployment/administration/SHM_Package_Repository_Teardown.ps1 deleted file mode 100644 index c787aa7276..0000000000 --- a/deployment/administration/SHM_Package_Repository_Teardown.ps1 +++ /dev/null @@ -1,41 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Which tier of mirrors should be torn down")] - [ValidateSet("2", "3")] - [string]$tier -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - -# Check if Resource Group exists -$null = Get-AzResourceGroup -Name $config.repositories.rg -Location $config.location -ErrorVariable notExists -ErrorAction SilentlyContinue -if ($notExists) { - Add-LogMessage -Level InfoSuccess "Resource group '$($config.repositories.rg)' does not exist" -} else { - # Tear down repository VMs and associated disks/network cards - Get-AzVM -ResourceGroupName $config.repositories.rg | Where-Object { $_.Name -like "*-TIER-${tier}" } | ForEach-Object { - Remove-VirtualMachine -Name $_.Name -ResourceGroupName $_.ResourceGroupName -Force - Remove-ManagedDisk -Name "$($_.Name)-OS-DISK" -ResourceGroupName $config.repositories.rg - Remove-ManagedDisk -Name "$($_.Name)-DATA-DISK" -ResourceGroupName $config.repositories.rg - Remove-NetworkInterface -Name "$($_.Name)-NIC" -ResourceGroupName $config.repositories.rg - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SHM_Package_Repository_Update_Allowlists.ps1 b/deployment/administration/SHM_Package_Repository_Update_Allowlists.ps1 deleted file mode 100644 index 3a9d50e1cd..0000000000 --- a/deployment/administration/SHM_Package_Repository_Update_Allowlists.ps1 +++ /dev/null @@ -1,117 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "Path to directory containing allowlist files (default: '/environment_configs/package_lists')")] - [string]$AllowlistDirectory = $null -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Common variable names -# --------------------- -if (-not $AllowlistDirectory) { $AllowlistDirectory = Join-Path $PSScriptRoot ".." ".." "environment_configs" "package_lists" } -$SourceRepositoryNames = @("pypi", "cran") -$tier = "3" # currently only Tier-3 mirrors have allowlists -$AllowList = @{} - - -# Load package lists into arrays -# ------------------------------ -foreach ($SourceRepositoryName in $SourceRepositoryNames) { - try { - $AllowListRepositoryName = "$SourceRepositoryName".ToLower().Replace("cran", "r-cran").Replace("pypi", "python-pypi") - $AllowListPath = Join-Path $AllowlistDirectory "allowlist-full-${AllowListRepositoryName}-tier${tier}.list".ToLower() -Resolve -ErrorAction Stop - $AllowList[$SourceRepositoryName] = (Get-Content $AllowListPath -Raw -ErrorAction Stop) -split "`n" | Where-Object { $_ -and (-not $_.StartsWith("#")) } # remove empty lines and commented packages - Add-LogMessage -Level Info "Loaded allowlist from '$AllowListPath'" - } catch { - $AllowList[$SourceRepositoryName] = @() - Add-LogMessage -Level Error "Could not find allowlist at '$AllowListPath'" - } -} - - -# If we are using proxies then construct script to -# -# 1. Write PyPI allowlist -# 2. Write CRAN allowlist -# 3. Run single job to update all Nexus repositories -if ($config.repositories["tier${tier}"].proxies) { - # Construct single update script - $script = "#! /bin/bash`n" - foreach ($SourceRepositoryName in $SourceRepositoryNames) { - # Empty existing allowlist - $script += ": > /etc/nexus/allowlist-${SourceRepositoryName}`n" # ':' is the shell no-op command - # Write items from local allowlist to remote - $script += "cat << EOF > /etc/nexus/allowlist-${SourceRepositoryName}`n" - foreach ($package in $AllowList[$SourceRepositoryName]) { - $script += "${package}`n" - } - $script += "EOF`n" - $script += "echo `"There are `$(wc -l /etc/nexus/allowlist-${SourceRepositoryName} | cut -d' ' -f1)`" packages on the ${SourceRepositoryName} allowlist`n" - } - $script += "/usr/local/update-nexus-allowlists`n" - # Update the allowlists on the proxy VM - try { - $vmName = $config.repositories["tier${tier}"].proxies.many.vmName - Add-LogMessage -Level Info "Updating allowlists on $vmName..." - # Ensure the VM is running - $null = Start-VM -Name $vmName -ResourceGroupName $config.repositories.rg - # Run the script on the Nexus VM - $null = Invoke-RemoteScript -VMName $vmName -ResourceGroupName $config.repositories.rg -Shell "UnixShell" -Script $script - } catch { - Add-LogMessage -Level Error "Could not update allowlists for VM '$vmName'. Is it deployed and running?" - } -} - -# If we are using mirrors then construct one script for each repository type to -# -# 1. Write allowlist -# 2. Run job to update mirror -if ($config.repositories["tier${tier}"].mirrorsExternal) { - foreach ($SourceRepositoryName in $SourceRepositoryNames) { - # Construct repository update script - $script = "#! /bin/bash`n" - # Empty existing allowlist - $script += ": > /home/mirrordaemon/package_allowlist.txt`n" # ':' is the shell no-op command - # Write items from local allowlist to remote - $script += "cat << EOF > /home/mirrordaemon/package_allowlist.txt`n" - foreach ($package in $AllowList[$SourceRepositoryName]) { - $script += "${package}`n" - } - $script += "EOF`n" - $script += "echo `"There are `$(wc -l /home/mirrordaemon/package_allowlist.txt | cut -d' ' -f1)`" packages on the allowlist`n" - # PyPI also needs us to run the script which updates /etc/bandersnatch.conf - if ($SourceRepositoryName.ToLower() -eq "pypi") { - $script += "python3 /home/mirrordaemon/update_bandersnatch_config.py`n" - } - # Update the allowlists on the mirror VM - try { - $vmName = $config.repositories["tier${tier}"].mirrorsExternal[$SourceRepositoryName].vmName - Add-LogMessage -Level Info "Updating allowlists on $vmName..." - # Ensure the VM is running - $null = Start-VM -Name $vmName -ResourceGroupName $config.repositories.rg - # Run the script on the mirror VM - $null = Invoke-RemoteScript -VMName $vmName -ResourceGroupName $config.repositories.rg -Shell "UnixShell" -Script $script - # Restart the mirror to trigger a pull-then-push - Start-VM -Name $vmName -ResourceGroupName $config.repositories.rg -ForceRestart - } catch { - Add-LogMessage -Level Error "Could not update allowlists for VM '$vmName'. Is it deployed and running?" - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SHM_Teardown.ps1 b/deployment/administration/SHM_Teardown.ps1 deleted file mode 100644 index b10c5e7485..0000000000 --- a/deployment/administration/SHM_Teardown.ps1 +++ /dev/null @@ -1,83 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "No-op mode which will not remove anything")] - [Switch]$dryRun -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Make user confirm before beginning deletion -# ------------------------------------------- -$ResourceGroups = Get-ShmResourceGroups -shmConfig $config -if ($dryRun.IsPresent) { - Add-LogMessage -Level Warning "This would remove $($ResourceGroups.Count) resource group(s) belonging to SHM '$($config.id)' from '$($config.subscriptionName)'!" -} else { - Add-LogMessage -Level Warning "This will remove $($ResourceGroups.Count) resource group(s) belonging to SHM '$($config.id)' from '$($config.subscriptionName)'!" - $ResourceGroups | ForEach-Object { Add-LogMessage -Level Warning "... $($_.ResourceGroupName)" } - $confirmation = Read-Host "Are you sure you want to proceed? [y/n]" - while ($confirmation -ne "y") { - if ($confirmation -eq "n") { exit 0 } - $confirmation = Read-Host "Are you sure you want to proceed? [y/n]" - } -} - - -# Remove SHM resource groups and the resources they contain -# --------------------------------------------------------- -if ($ResourceGroups.Count) { - $ResourceGroupNames = $ResourceGroups | ForEach-Object { $_.ResourceGroupName } - if ($dryRun.IsPresent) { - $ResourceGroupNames | ForEach-Object { - Add-LogMessage -Level Info "Skipping removal of resource group '$_' with its contents." - } - } else { - Remove-AllResourceGroups -ResourceGroupNames $ResourceGroupNames -MaxAttempts 60 - } -} - - -# Warn if any resources or groups remain -# -------------------------------------- -$ResourceGroups = $dryRun.IsPresent ? $null : (Get-ShmResourceGroups -shmConfig $config) -if ($ResourceGroups) { - Add-LogMessage -Level Error "There are still $($ResourceGroups.Length) undeleted resource group(s) remaining!" - foreach ($ResourceGroup in $ResourceGroups) { - Add-LogMessage -Level Error "$($ResourceGroup.ResourceGroupName)" - Get-ResourcesInGroup -ResourceGroupName $ResourceGroup.ResourceGroupName | ForEach-Object { - Add-LogMessage -Level Error "... $($_.Name) [$($_.ResourceType)]" - } - } - Add-LogMessage -Level Fatal "Failed to teardown SHM '$($config.id)'!" -} - - -# Remove DNS data from the DNS subscription -# ----------------------------------------- -if ($dryRun.IsPresent) { - Add-LogMessage -Level Info "Would remove '@' TXT record from SHM '$($config.id)' DNS zone $($config.domain.fqdn)" -} else { - Remove-DnsRecord -RecordName "@" ` - -RecordType "TXT" ` - -ResourceGroupName $config.dns.rg ` - -SubscriptionName $config.dns.subscriptionName ` - -ZoneName $config.domain.fqdn -} - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SRE_Enroll_Disk_Backup.ps1 b/deployment/administration/SRE_Enroll_Disk_Backup.ps1 deleted file mode 100644 index 6954306883..0000000000 --- a/deployment/administration/SRE_Enroll_Disk_Backup.ps1 +++ /dev/null @@ -1,35 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $true, HelpMessage = "Enter the name of the resource group containing the new disk")] - [string]$resourceGroup, - [Parameter(Mandatory = $true, HelpMessage = "Enter the name of the new disk")] - [string]$diskName -) - -Import-Module $PSScriptRoot/../common/AzureDataProtection -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop - -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - -# Get backup vault -$Vault = Get-AzDataProtectionBackupVault -ResourceGroupName $config.sre.backup.rg ` - -VaultName $config.sre.backup.vault.name ` - -# Get disk backup policy -$Policy = Get-AzDataProtectionBackupPolicy -Name $config.sre.backup.disk.policy_name ` - -ResourceGroupName $config.sre.backup.rg ` - -VaultName $Vault.Name - -# Create backup instance for named disk -$Disk = Get-AzDisk -ResourceGroupName $resourceGroup -DiskName $diskName -$null = Deploy-DataProtectionBackupInstance -BackupPolicyId $Policy.Id ` - -ResourceGroupName $config.sre.backup.rg ` - -VaultName $Vault.Name ` - -DataSourceType 'disk' ` - -DataSourceId $Disk.Id ` - -DataSourceLocation $Disk.Location ` - -DataSourceName $Disk.Name diff --git a/deployment/administration/SRE_Manage_VMs.ps1 b/deployment/administration/SRE_Manage_VMs.ps1 deleted file mode 100644 index aca6d79f83..0000000000 --- a/deployment/administration/SRE_Manage_VMs.ps1 +++ /dev/null @@ -1,62 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $true, HelpMessage = "Enter action (Start, Shutdown or Restart)")] - [ValidateSet("EnsureStarted", "EnsureStopped")] - [string]$Action -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - -# Get all VMs in matching resource groups -$vmsByRg = Get-VMsByResourceGroupPrefix -ResourceGroupPrefix $config.sre.rgPrefix - -switch ($Action) { - "EnsureStarted" { - # Remove remote desktop VMs to process last - # May be able to simplify this further now that MSRDS is removed - $remoteDesktopVms = $vmsByRg[$config.sre.remoteDesktop.rg] - $vmsByRg.Remove($config.sre.remoteDesktop.rg) - # Start all other VMs before RDS VMs so all services will be available when users can login via RDS - foreach ($key in $vmsByRg.Keys) { - $rgVms = $vmsByRg[$key] - $rgName = $rgVms[0].ResourceGroupName - Add-LogMessage -Level Info "Ensuring VMs in resource group '$rgName' are started..." - foreach ($vm in $rgVms) { - Start-VM -VM $vm - } - } - # Ensure remote desktop VMs are started - Add-LogMessage -Level Info "Ensuring VMs in resource group '$($config.sre.remoteDesktop.rg)' are started..." - # Start Guacamole VMs - $remoteDesktopVms | ForEach-Object { Start-VM -VM $_ } - } - "EnsureStopped" { - foreach ($key in $vmsByRg.Keys) { - $rgVms = $vmsByRg[$key] - $rgName = $rgVms[0].ResourceGroupName - Add-LogMessage -Level Info "Ensuring VMs in resource group '$rgName' are stopped..." - foreach ($vm in $rgVms) { - Stop-VM -VM $vm -NoWait - } - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SRE_Minimise_VM_Sizes.ps1 b/deployment/administration/SRE_Minimise_VM_Sizes.ps1 deleted file mode 100644 index 99a7c1d926..0000000000 --- a/deployment/administration/SRE_Minimise_VM_Sizes.ps1 +++ /dev/null @@ -1,61 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(HelpMessage = "Enter VM Size for all VMs")] - [ValidateSet("Tiny", "Small")] - [string]$Size = "Small" -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - -# Set VM size -if ($Size -eq "Tiny") { - $vmSize = "Standard_B2ms" -} elseif ($Size -eq "Small") { - $vmSize = "Standard_D2_v3" -} - -# Get all VMs in matching resource groups -$vmsByRg = Get-VMsByResourceGroupPrefix -ResourceGroupPrefix $config.sre.rgPrefix - -foreach ($key in $vmsByRg.Keys) { - $rgVms = $vmsByRg[$key] - $rgName = $rgVms[0].ResourceGroupName - Add-LogMessage -Level Info "Ensuring VMs in resource group '$rgName' are resized to '$vmSize'..." - foreach ($vm in $rgVms) { - if ($vm.HardwareProfile.VmSize -eq $vmSize) { - Add-LogMessage -Level InfoSuccess "VM '$($VM.Name)' is already size '$vmSize'." - } else { - $vmStatuses = (Get-AzVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Status).Statuses.Code - if ($vmStatuses -contains "ProvisioningState/succeeded") { - $vm.HardwareProfile.VmSize = $vmSize - $result = Update-AzVM -VM $vm -ResourceGroupName $vm.ResourceGroupName -NoWait - if ($result.IsSuccessStatusCode) { - Add-LogMessage -Level Success "Resize request to '$vmSize' accepted for VM '$($vm.Name)'.'" - } else { - Add-LogMessage -Level Fatal "Unexpected status '$($result.StatusCode) ($($result.ReasonPhrase))' encountered when requesting resize of VM '$($vm.Name)' to '$vmSize').'" - } - } else { - Add-LogMessage -Level Warning "VM '$($vm.Name)' not in supported status: $vmStatus. No action taken." - } - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SRE_SRD_Remote_Diagnostics.ps1 b/deployment/administration/SRE_SRD_Remote_Diagnostics.ps1 deleted file mode 100644 index 9a72120f91..0000000000 --- a/deployment/administration/SRE_SRD_Remote_Diagnostics.ps1 +++ /dev/null @@ -1,73 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $true, HelpMessage = "Enter last octet of SRD IP address (e.g. 160)")] - [string]$ipLastOctet -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/RemoteCommands -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Find VM with private IP address matching the provided last octet -# ---------------------------------------------------------------- -Add-LogMessage -Level Info "Finding SRD with last IP octet: $ipLastOctet" -$vmId = Get-AzNetworkInterface -ResourceGroupName $config.sre.srd.rg | Where-Object { ($_.IpConfigurations.PrivateIpAddress).Split(".") -eq $ipLastOctet } | ForEach-Object { $_.VirtualMachine.Id } -$vmIpAddress = (Get-AzNetworkInterface | Where-Object { $_.VirtualMachine.Id -eq $vmId }).IpConfigurations.PrivateIpAddress -$vm = Get-AzVM -ResourceGroupName $config.sre.srd.rg | Where-Object { $_.Id -eq $vmId } -if ($?) { - Add-LogMessage -Level Success "Found SRD '$($vm.Name)'" -} else { - Add-LogMessage -Level Fatal "Could not find VM with last IP octet '$ipLastOctet'" -} - - -# Run remote diagnostic scripts -# ----------------------------- -& $(Join-Path $PSScriptRoot '..' 'secure_research_environment' 'setup' 'Run_SRE_SRD_Remote_Diagnostics.ps1') -shmId $shmId -sreId $sreId -ipLastOctet $ipLastOctet - - -# Get LDAP secret from the Key Vault -# ---------------------------------- -Add-LogMessage -Level Info "[ ] Loading LDAP secret from Key Vault '$($config.sre.keyVault.name)'" -$ldapSearchPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.users.serviceAccounts.ldapSearch.passwordSecretName -DefaultLength 20 -AsPlaintext -if ($ldapSearchPassword) { - Add-LogMessage -Level Success "Found LDAP secret in the Key Vault" -} else { - Add-LogMessage -Level Fatal "Could not load LDAP secret from Key Vault '$($config.sre.keyVault.name)'" -} - - -# Update LDAP secret on the SRD -# ----------------------------- -Update-VMLdapSecret -Name $vm.Name -ResourceGroupName $config.sre.srd.rg -LdapSearchPassword $ldapSearchPassword - - -# Update LDAP secret in local Active Directory on the SHM DC -# ---------------------------------------------------------- -Update-AdLdapSecret -Name $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -SubscriptionName $config.shm.subscriptionName -LdapSearchPassword $ldapSearchPassword -LdapSearchSamAccountName $config.sre.users.serviceAccounts.ldapSearch.samAccountName - - -# Update DNS record on the SHM for this VM -# ---------------------------------------- -Update-VMDnsRecords -DcName $config.shm.dc.vmName -DcResourceGroupName $config.shm.dc.rg -BaseFqdn $config.shm.domain.fqdn -ShmSubscriptionName $config.shm.subscriptionName -VmHostname $vm.Name -VmIpAddress $vmIpAddress - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/SRE_Teardown.ps1 b/deployment/administration/SRE_Teardown.ps1 deleted file mode 100644 index aa62102510..0000000000 --- a/deployment/administration/SRE_Teardown.ps1 +++ /dev/null @@ -1,125 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $false, HelpMessage = "No-op mode which will not remove anything")] - [Switch]$dryRun -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Automation -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureDataProtection -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../common/Logging -Force -ErrorAction Stop - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Make user confirm before beginning deletion -# ------------------------------------------- -$ResourceGroups = Get-SreResourceGroups -sreConfig $config -if ($dryRun.IsPresent) { - Add-LogMessage -Level Warning "This would remove $($ResourceGroups.Count) resource group(s) belonging to SRE '$($config.sre.id)' from '$($config.sre.subscriptionName)'!" -} else { - Add-LogMessage -Level Warning "This will remove $($ResourceGroups.Count) resource group(s) belonging to SRE '$($config.sre.id)' from '$($config.sre.subscriptionName)'!" - $ResourceGroups | ForEach-Object { Add-LogMessage -Level Warning "... $($_.ResourceGroupName)" } - $confirmation = Read-Host "Are you sure you want to proceed? [y/n]" - while ($confirmation -ne "y") { - if ($confirmation -eq "n") { exit 0 } - $confirmation = Read-Host "Are you sure you want to proceed? [y/n]" - } -} - -# Remove backup instances and policies. Without this the backup vault cannot be deleted -# ------------------------------------------------------------------------------------- -if ($dryRun.IsPresent) { - Add-LogMessage -Level Info "Backup instances from $($config.sre.backup.vault.name) would be deleted" - Add-LogMessage -Level Info "Disk snapshots from from $($config.sre.backup.rg) would be deleted" -} else { - Remove-DataProtectionBackupInstances -ResourceGroupName $config.sre.backup.rg -VaultName $config.sre.backup.vault.name - Remove-DataProtectionBackupDiskSnapshots -ResourceGroupName $config.sre.backup.rg -} - - -# Remove SRE resource groups and the resources they contain -# --------------------------------------------------------- -if ($ResourceGroups.Count) { - $ResourceGroupNames = $ResourceGroups | ForEach-Object { $_.ResourceGroupName } - if ($dryRun.IsPresent) { - $ResourceGroupNames | ForEach-Object { - Add-LogMessage -Level Info "Skipping removal of resource group '$_' with its contents." - } - } else { - Remove-AllResourceGroups -ResourceGroupNames $ResourceGroupNames -MaxAttempts 60 - } -} - - -# Warn if any resources or groups remain -# -------------------------------------- -$ResourceGroups = $dryRun.IsPresent ? $null : (Get-SreResourceGroups -sreConfig $config) -if ($ResourceGroups) { - Add-LogMessage -Level Error "There are still $($ResourceGroups.Count) undeleted resource group(s) remaining!" - foreach ($ResourceGroup in $ResourceGroups) { - Add-LogMessage -Level Error "$($ResourceGroup.ResourceGroupName)" - Get-ResourcesInGroup -ResourceGroupName $ResourceGroup.ResourceGroupName | ForEach-Object { - Add-LogMessage -Level Error "... $($_.Name) [$($_.ResourceType)]" - } - } - Add-LogMessage -Level Fatal "Failed to teardown SRE '$($config.sre.id)'!" -} - - -# Remove residual SRE data from the SHM -# ------------------------------------- -$scriptPath = Join-Path $PSScriptRoot ".." "secure_research_environment" "setup" "Remove_SRE_Data_From_SHM.ps1" -if ($dryRun.IsPresent) { - Add-LogMessage -Level Info "SRE data would be removed from the SHM by running: $scriptPath -shmId $shmId -sreId $sreId" -} else { - & $scriptPath -shmId $shmId -sreId $sreId -} - - -# Remove update configuration from the SHM automation account -# ----------------------------------------------------------- -try { - Add-LogMessage -Level Info "Removing update automation for SRE $sreId..." - $null = Remove-AzAutomationSoftwareUpdateConfiguration -Name "sre-$($config.sre.id.ToLower())-windows" -AutomationAccountName $config.shm.monitoring.automationAccount.name -ResourceGroupName $config.shm.monitoring.rg -ErrorAction Stop - $null = Remove-AzAutomationSoftwareUpdateConfiguration -Name "sre-$($config.sre.id.ToLower())-linux" -AutomationAccountName $config.shm.monitoring.automationAccount.name -ResourceGroupName $config.shm.monitoring.rg -ErrorAction Stop - Add-LogMessage -Level Success "Removed update automation for SRE $sreId" -} catch { - Add-LogMessage -Level Failure "Failed to remove update automation for SRE $sreId!" -Exception $_.Exception -} - - -# Tear down the AzureAD application -# --------------------------------- -if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - $AzureAdApplicationName = "Guacamole SRE $($config.sre.id)" - if ($dryRun.IsPresent) { - Add-LogMessage -Level Info "'$AzureAdApplicationName' would be removed from Azure Active Directory..." - } else { - Add-LogMessage -Level Info "Ensuring that '$AzureAdApplicationName' is removed from Azure Active Directory..." - if (-not (Get-MgContext)) { - Connect-MgGraph -TenantId $config.shm.azureAdTenantId -Scopes "Application.ReadWrite.All", "Policy.ReadWrite.ApplicationConfiguration" -ErrorAction Stop -ContextScope Process - } - try { - Get-MgApplication -Filter "DisplayName eq '$AzureAdApplicationName'" | ForEach-Object { Remove-MgApplication -ApplicationId $_.Id } - Add-LogMessage -Level Success "'$AzureAdApplicationName' has been removed from Azure Active Directory" - } catch { - Add-LogMessage -Level Fatal "Could not remove '$AzureAdApplicationName' from Azure Active Directory!" -Exception $_.Exception - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/administration/remote/Delete_Unassigned_Users.ps1 b/deployment/administration/remote/Delete_Unassigned_Users.ps1 deleted file mode 100644 index d514d01439..0000000000 --- a/deployment/administration/remote/Delete_Unassigned_Users.ps1 +++ /dev/null @@ -1,33 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "yes/no determines whether users should actually be deleted")] - [string]$dryRun -) - -# Extract list of users -$userOuPath = (Get-ADObject -Filter * | Where-Object { $_.Name -eq "Safe Haven Research Users" }).DistinguishedName -$users = Get-ADUser -Filter * -SearchBase "$userOuPath" -Properties * -foreach ($user in $users) { - $groupName = ($user | Select-Object -ExpandProperty MemberOf | ForEach-Object { (($_ -Split ",")[0] -Split "=")[1] }) -join "|" - if (!($groupName)) { - $name = $user.SamAccountName - if ($dryRun -eq "yes") { - Write-Output "User $name would be deleted by this action" - } else { - Write-Output "Deleting $name" - Remove-ADUser -Identity $name -Confirm:$false - } - } -} - -# Force sync with AzureAD. It will still take around 5 minutes for changes to propagate -if ($dryRun -eq "no") { - Write-Output "Synchronising locally Active Directory with Azure" - try { - Import-Module -Name "C:\Program Files\Microsoft Azure AD Sync\Bin\ADSync" -ErrorAction Stop - Start-ADSyncSyncCycle -PolicyType Delta - } catch [System.IO.FileNotFoundException] { - Write-Output "Skipping as Azure AD Sync is not installed" - } catch { - Write-Output "Unable to run Azure Active Directory synchronisation!" - } -} diff --git a/deployment/common/AzureAutomation.psm1 b/deployment/common/AzureAutomation.psm1 deleted file mode 100644 index 660f3cc63d..0000000000 --- a/deployment/common/AzureAutomation.psm1 +++ /dev/null @@ -1,224 +0,0 @@ -Import-Module Az.Automation -ErrorAction Stop -Import-Module Az.MonitoringSolutions -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Connect an automation account to a log analytics workspace -# ---------------------------------------------------------- -function Connect-AutomationAccountLogAnalytics { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of automation account to connect")] - [string]$AutomationAccountName, - [Parameter(Mandatory = $true, HelpMessage = "Log analytics workspace to connect")] - [Microsoft.Azure.Management.Internal.Network.Common.IOperationalInsightWorkspace]$LogAnalyticsWorkspace - ) - $accountResourceId = (Get-AzResource | Where-Object { $_.Name -eq $AutomationAccountName } | Select-Object -First 1).ResourceId - $linked = Get-AzOperationalInsightsLinkedService -ResourceGroupName $LogAnalyticsWorkspace.ResourceGroupName -WorkspaceName $LogAnalyticsWorkspace.Name | Where-Object { ($_.Name -eq "Automation") -and ($_.ResourceId -eq $accountResourceId) } - if (-not $linked) { - $null = Set-AzOperationalInsightsLinkedService -LinkedServiceName "Automation" ` - -ResourceGroupName $LogAnalyticsWorkspace.ResourceGroupName ` - -WorkspaceName $LogAnalyticsWorkspace.Name ` - -WriteAccessResourceId "$accountResourceId" - if ($?) { - Add-LogMessage -Level Success "Linked automation account '$AutomationAccountName' to log analytics workspace '$($LogAnalyticsWorkspace.Name)'." - } else { - Add-LogMessage -Level Fatal "Failed to link automation account '$AutomationAccountName' to log analytics workspace '$($LogAnalyticsWorkspace.Name)'!" - } - } else { - Add-LogMessage -Level Info "Automation account '$AutomationAccountName' is already linked to log analytics workspace '$($LogAnalyticsWorkspace.Name)'." - } -} -Export-ModuleMember -Function Connect-AutomationAccountLogAnalytics - - -# Create automation account if it does not exist -# ---------------------------------------------- -function Deploy-AutomationAccount { - param( - [Parameter(Mandatory = $true, HelpMessage = "Location of automation account to deploy")] - [string]$Location, - [Parameter(Mandatory = $true, HelpMessage = "Name of automation account to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName - ) - Add-LogMessage -Level Info "Ensuring that automation account '$Name' exists..." - try { - $automationAccount = Get-AzAutomationAccount -Name $Name -ResourceGroupName $ResourceGroupName -ErrorAction Stop - Add-LogMessage -Level InfoSuccess "Automation account '$Name' already exists" - } catch { - Add-LogMessage -Level Info "[ ] Creating automation account '$Name'" - $automationAccount = New-AzAutomationAccount -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -Plan "Free" - if ($?) { - Add-LogMessage -Level Success "Created automation account '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create automation account '$Name'!" - } - } - return $automationAccount -} -Export-ModuleMember -Function Deploy-AutomationAccount - - -# Create automation query if it does not exist -# -------------------------------------------- -function Deploy-AutomationAzureQuery { - param( - [Parameter(Mandatory = $true, HelpMessage = "Automation account to deploy the schedule into")] - [Microsoft.Azure.Commands.Automation.Model.AutomationAccount]$Account, - [Parameter(Mandatory = $true, HelpMessage = "Resource groups covered by the query")] - [Microsoft.Azure.Commands.ResourceManager.Cmdlets.SdkModels.PSResourceGroup[]]$ResourceGroups - ) - try { - return New-AzAutomationUpdateManagementAzureQuery -AutomationAccountName $Account.AutomationAccountName ` - -Location $Account.location ` - -ResourceGroupName $Account.ResourceGroupName ` - -Scope @($ResourceGroups | ForEach-Object { $_.ResourceId } ) ` - -ErrorAction Stop - } catch { - Add-LogMessage -Level Fatal "Failed to create automation query!" -Exception $_.Exception - } -} -Export-ModuleMember -Function Deploy-AutomationAzureQuery - - -# Create automation schedule if it does not exist -# ----------------------------------------------- -function Deploy-AutomationScheduleInDays { - param( - [Parameter(Mandatory = $true, HelpMessage = "Automation account to deploy the schedule into")] - [Microsoft.Azure.Commands.Automation.Model.AutomationAccount]$Account, - [Parameter(Mandatory = $false, HelpMessage = "Interval in days")] - [int]$DayInterval = 1, - [Parameter(Mandatory = $true, HelpMessage = "Name of automation schedule to deploy")] - [string]$Name, - [Parameter(Mandatory = $false, HelpMessage = "Day of the week to start on")] - [ValidateSet("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")] - [string]$StartDayOfWeek = $null, - [Parameter(Mandatory = $true, HelpMessage = "Start time")] - [string]$Time, - [Parameter(Mandatory = $false, HelpMessage = "Time zone")] - [System.TimeZoneInfo]$TimeZone = "UTC" - ) - try { - Add-LogMessage -Level Info "Ensuring that automation schedule '$Name' exists..." - # Remove any existing automation schedule with the same name, noting that a UUID might have been appended to the name - $schedule = Get-AzAutomationSchedule -ResourceGroupName $Account.ResourceGroupName -AutomationAccountName $Account.AutomationAccountName | Where-Object { $_.Name -like "${Name}*" } | Remove-AzAutomationSchedule -Force - # Create the new automation schedule - Add-LogMessage -Level Info "[ ] Creating automation schedule '$Name'" - # Set the appropriate time and day for this schedule to begin - $StartDateTime = (Get-Date $Time).AddDays(1) - if ($StartDayOfWeek) { - while ($StartDateTime.DayOfWeek -ne $StartDayOfWeek) { - $StartDateTime = $StartDateTime.AddDays(1) - } - } - $schedule = New-AzAutomationSchedule -AutomationAccountName $account.AutomationAccountName ` - -DayInterval $DayInterval ` - -ForUpdateConfiguration ` - -Name $Name ` - -ResourceGroupName $account.ResourceGroupName ` - -StartTime $StartDateTime ` - -TimeZone $TimeZone.Id ` - -ErrorAction Stop - Add-LogMessage -Level Success "Created automation schedule '$($schedule.Name)'. Next trigger will be at ${StartDateTime}." - return $schedule - } catch { - Add-LogMessage -Level Fatal "Failed to create automation schedule '$Name'!" -Exception $_.Exception - } -} -Export-ModuleMember -Function Deploy-AutomationScheduleInDays - - -# Create log analytics solution if it does not exist -# -------------------------------------------------- -function Deploy-LogAnalyticsSolution { - param( - [Parameter(Mandatory = $true, HelpMessage = "Solution type")] - [ValidateSet("Updates")] - [string]$SolutionType, - [Parameter(Mandatory = $true, HelpMessage = "Log analytics workspace")] - [Microsoft.Azure.Management.Internal.Network.Common.IOperationalInsightWorkspace]$Workspace - ) - $solution = Get-AzMonitorLogAnalyticsSolution -ResourceGroupName $Workspace.ResourceGroupName | Where-Object { $_.Name -eq "$SolutionType($($Workspace.Name))" } - if (-not $solution) { - # As New-AzMonitorLogAnalyticsSolution always fails, we attempt to create and then check for existence - $null = New-AzMonitorLogAnalyticsSolution -ResourceGroupName $Workspace.ResourceGroupName -Type "$SolutionType" -WorkspaceResourceId $Workspace.ResourceId -Location $Workspace.Location -ErrorAction SilentlyContinue - $solution = Get-AzMonitorLogAnalyticsSolution -ResourceGroupName $Workspace.ResourceGroupName | Where-Object { $_.Name -eq "$SolutionType($($Workspace.Name))" } - if ($solution) { - Add-LogMessage -Level Success "Deployed solution '$SolutionType' to $($Workspace.Name)" - } else { - Add-LogMessage -Level Fatal "Failed to deploy solution '$SolutionType' to $($Workspace.Name)!" - } - } else { - Add-LogMessage -Level Info "Solution '$SolutionType' has already been deployed to $($Workspace.Name)" - } - return $solution -} -Export-ModuleMember -Function Deploy-LogAnalyticsSolution - - -# Register VMs with automation schedule -# ------------------------------------- -function Register-VmsWithAutomationSchedule { - param( - [Parameter(Mandatory = $true, HelpMessage = "Automation account to use")] - [Microsoft.Azure.Commands.Automation.Model.AutomationAccount]$Account, - [Parameter(Mandatory = $false, HelpMessage = "How many hours to allow for updates")] - [int]$DurationHours = 2, - [Parameter(Mandatory = $false, HelpMessage = "Which categories of update to include")] - [string[]]$IncludedUpdateCategories, - [Parameter(Mandatory = $true, ParameterSetName = "ByQuery", HelpMessage = "Azure query to apply the schedule to")] - [Microsoft.Azure.Commands.Automation.Model.UpdateManagement.AzureQueryProperties]$Query, - [Parameter(Mandatory = $true, HelpMessage = "Schedule to apply to the VMs")] - [Microsoft.Azure.Commands.Automation.Model.Schedule]$Schedule, - [Parameter(Mandatory = $true, ParameterSetName = "ByVMId", HelpMessage = "IDs of VMs to apply the schedule to")] - [AllowNull()] - [string[]]$VmIds, - [Parameter(Mandatory = $true, HelpMessage = "Type of VMs")] - [ValidateSet("Linux", "Windows")] - [string]$VmType - ) - # Use either query or VM IDs - $params = @{ "Duration" = (New-TimeSpan -Hours $DurationHours) } - if ($Query) { - $params["AzureQuery"] = $Query - } else { - if ((-not $VmIds) -or ($VmIds.Count -eq 0)) { - Add-LogMessage -Level Warning "Skipping application of automation schedule '$($ScheduleName)' as no VMs were specified." - return $null - } - $params["AzureVMResourceId"] = $VmIds - } - Add-LogMessage -Level Info "Applying automation schedule '$($Schedule.Name)' for $VmType VM(s)..." - try { - # Remove any existing update configuration with the same name - $null = Remove-AzAutomationSoftwareUpdateConfiguration -ResourceGroupName "$($Account.ResourceGroupName)" -AutomationAccountName "$($Account.AutomationAccountName)" -Name $Schedule.Name -ErrorAction SilentlyContinue - if ($VmType -eq "Windows") { - $IncludedPackageClassification = @("Critical", "Definition", "FeaturePack", "Security", "ServicePack", "Tools", "Unclassified", "UpdateRollup", "Updates") | Where-Object { $IncludedUpdateCategories.Contains($_) } - $config = New-AzAutomationSoftwareUpdateConfiguration -AutomationAccountName $Account.AutomationAccountName ` - -Confirm:$false ` - -ErrorAction Stop ` - -IncludedUpdateClassification $IncludedPackageClassification ` - -ResourceGroupName $Account.ResourceGroupName ` - -Schedule $Schedule ` - -Windows ` - @params - } else { - $IncludedPackageClassification = @("Critical", "Other", "Security", "Unclassified") | Where-Object { $IncludedUpdateCategories.Contains($_) } - $config = New-AzAutomationSoftwareUpdateConfiguration -AutomationAccountName $Account.AutomationAccountName ` - -Confirm:$false ` - -ErrorAction Stop ` - -IncludedPackageClassification $IncludedPackageClassification ` - -Linux ` - -ResourceGroupName $Account.ResourceGroupName ` - -Schedule $Schedule ` - @params - } - Add-LogMessage -Level Success "Applied automation schedule '$($Schedule.Name)' to $VmType VM(s)." - return $config - } catch { - Add-LogMessage -Level Fatal "Failed to apply automation schedule '$($Schedule.Name)' to $($VmIds.Count) VM(s)!" -Exception $_.Exception - } -} -Export-ModuleMember -Function Register-VmsWithAutomationSchedule diff --git a/deployment/common/AzureCompute.psm1 b/deployment/common/AzureCompute.psm1 deleted file mode 100644 index 2371484afb..0000000000 --- a/deployment/common/AzureCompute.psm1 +++ /dev/null @@ -1,807 +0,0 @@ -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module Az.Storage -ErrorAction Stop -Import-Module $PSScriptRoot/AzureNetwork -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Add a VM to a domain -# -------------------- -function Add-WindowsVMtoDomain { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM to domain join")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Resource group for VM to domain join")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Domain name to join")] - [string]$DomainName, - [Parameter(Mandatory = $true, HelpMessage = "Username for domain joining account")] - [string]$DomainJoinUsername, - [Parameter(Mandatory = $true, HelpMessage = "Password for domain joining account")] - [System.Security.SecureString]$DomainJoinPassword, - [Parameter(Mandatory = $true, HelpMessage = "The full distinguished name for the OU to add this VM to")] - [string]$OUPath, - [Parameter(Mandatory = $false, HelpMessage = "Force restart of VM if already running")] - [switch]$ForceRestart - ) - Add-LogMessage -Level Info "[ ] Attempting to join VM '$Name' to domain '$DomainName'" - $domainJoinCredentials = New-Object System.Management.Automation.PSCredential("${DomainName}\${DomainJoinUsername}", $DomainJoinPassword) - $null = Set-AzVMADDomainExtension -VMName $Name -ResourceGroupName $ResourceGroupName -DomainName $DomainName -Credential $domainJoinCredentials -Name "joindomain" -JoinOption 3 -TypeHandlerVersion 1.3 -OUPath $OUPath -Restart:$ForceRestart - if ($?) { - Add-LogMessage -Level Success "Joined VM '$Name' to domain '$DomainName'" - } else { - Add-LogMessage -Level Fatal "Failed to join VM '$Name' to domain '$DomainName'!" - } -} -Export-ModuleMember -Function Add-WindowsVMtoDomain - - -# Confirm VM is deallocated -# ------------------------- -function Confirm-VmDeallocated { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that the VM belongs to")] - [string]$ResourceGroupName - ) - try { - $vmStatuses = (Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName -Status -ErrorAction Stop).Statuses.Code - if ($vmStatuses -contains "ProvisioningState/failed/VMStoppedToWarnSubscription") { - Add-LogMessage -Level Warning "VM '$Name' has status: VMStoppedToWarnSubscription meaning that it was automatically stopped when the subscription ran out of credit." - } - return (($vmStatuses -contains "PowerState/deallocated") -and ($vmStatuses -contains "ProvisioningState/succeeded")) - } catch { - return $false - } -} -Export-ModuleMember -Function Confirm-VmDeallocated - - -# Confirm VM is running -# --------------------- -function Confirm-VmRunning { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that the VM belongs to")] - [string]$ResourceGroupName - ) - try { - $vmStatuses = (Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName -Status -ErrorAction Stop).Statuses.Code - if ($vmStatuses -contains "ProvisioningState/failed/VMStoppedToWarnSubscription") { - Add-LogMessage -Level Warning "VM '$Name' has status: VMStoppedToWarnSubscription meaning that it was automatically stopped when the subscription ran out of credit." - } - return (($vmStatuses -contains "PowerState/running") -and ($vmStatuses -contains "ProvisioningState/succeeded")) - } catch { - return $false - } -} -Export-ModuleMember -Function Confirm-VmRunning - - -# Confirm VM is stopped -# --------------------- -function Confirm-VmStopped { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that the VM belongs to")] - [string]$ResourceGroupName - ) - try { - $vmStatuses = (Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName -Status -ErrorAction Stop).Statuses.Code - if ($vmStatuses -contains "ProvisioningState/failed/VMStoppedToWarnSubscription") { - Add-LogMessage -Level Warning "VM '$Name' has status: VMStoppedToWarnSubscription meaning that it was automatically stopped when the subscription ran out of credit." - } - return (($vmStatuses -contains "PowerState/stopped") -and (($vmStatuses -contains "ProvisioningState/succeeded") -or ($vmStatuses -contains "ProvisioningState/failed/VMStoppedToWarnSubscription"))) - } catch { - return $false - } -} -Export-ModuleMember -Function Confirm-VmStopped - - -# Create Linux virtual machine if it does not exist -# ------------------------------------------------- -function Deploy-LinuxVirtualMachine { - param( - [Parameter(Mandatory = $true, HelpMessage = "Administrator password")] - [System.Security.SecureString]$AdminPassword, - [Parameter(Mandatory = $true, HelpMessage = "Administrator username")] - [string]$AdminUsername, - [Parameter(Mandatory = $true, HelpMessage = "Name of storage account for boot diagnostics")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$BootDiagnosticsAccount, - [Parameter(Mandatory = $true, HelpMessage = "Cloud-init YAML file")] - [string]$CloudInitYaml, - [Parameter(Mandatory = $true, ParameterSetName = "ByNicId_ByImageId", HelpMessage = "ID of VM image to deploy")] - [Parameter(Mandatory = $true, ParameterSetName = "ByIpAddress_ByImageId", HelpMessage = "ID of VM image to deploy")] - [string]$ImageId, - [Parameter(Mandatory = $true, ParameterSetName = "ByNicId_ByImageSku", HelpMessage = "SKU of VM image to deploy")] - [Parameter(Mandatory = $true, ParameterSetName = "ByIpAddress_ByImageSku", HelpMessage = "SKU of VM image to deploy")] - [string]$ImageSku, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location, - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual machine to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, ParameterSetName = "ByNicId_ByImageId", HelpMessage = "ID of network card to attach to this VM")] - [Parameter(Mandatory = $true, ParameterSetName = "ByNicId_ByImageSku", HelpMessage = "ID of network card to attach to this VM")] - [string]$NicId, - [Parameter(Mandatory = $true, HelpMessage = "OS disk type (eg. Standard_GRS)")] - [string]$OsDiskType, - [Parameter(Mandatory = $true, ParameterSetName = "ByIpAddress_ByImageId", HelpMessage = "Private IP address to assign to this VM")] - [Parameter(Mandatory = $true, ParameterSetName = "ByIpAddress_ByImageSku", HelpMessage = "Private IP address to assign to this VM")] - [string]$PrivateIpAddress, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Size of virtual machine to deploy")] - [string]$Size, - [Parameter(Mandatory = $true, ParameterSetName = "ByIpAddress_ByImageId", HelpMessage = "Subnet to deploy this VM into")] - [Parameter(Mandatory = $true, ParameterSetName = "ByIpAddress_ByImageSku", HelpMessage = "Subnet to deploy this VM into")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet, - [Parameter(Mandatory = $false, HelpMessage = "Administrator public SSH key")] - [string]$AdminPublicSshKey = $null, - [Parameter(Mandatory = $false, HelpMessage = "IDs of data disks")] - [string[]]$DataDiskIds = $null, - [Parameter(Mandatory = $false, HelpMessage = "Do not wait for deployment to finish")] - [switch]$NoWait = $false, - [Parameter(Mandatory = $false, HelpMessage = "Size of OS disk (GB)")] - [int]$OsDiskSizeGb = $null - ) - Add-LogMessage -Level Info "Ensuring that virtual machine '$Name' exists..." - $null = Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - $adminCredentials = New-Object System.Management.Automation.PSCredential("$AdminUsername", $AdminPassword) - # Build VM configuration - $vmConfig = New-AzVMConfig -VMName $Name -VMSize $Size - # Set source image to a custom image or to latest Ubuntu (default) - if ($ImageId) { - $vmConfig = Set-AzVMSourceImage -VM $vmConfig -Id $ImageId - } elseif ($ImageSku) { - if ($ImageSku -eq "Ubuntu-22.04") { - # Note that we cannot move 'Ubuntu-latest' to 22.04 until migrating to Azure Monitor Agent https://docs.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-migration - Add-LogMessage -Level Warning "Note that Ubuntu 22.04 is not supported by the Azure Log Analytics Agent used to manage automatic updates. Please consider using Ubuntu 20.04." - $vmConfig = Set-AzVMSourceImage -VM $vmConfig -PublisherName Canonical -Offer 0001-com-ubuntu-server-jammy -Skus "22_04-LTS" -Version "latest" - } elseif (($ImageSku -eq "Ubuntu-20.04") -or ($ImageSku -eq "Ubuntu-latest")) { - $vmConfig = Set-AzVMSourceImage -VM $vmConfig -PublisherName Canonical -Offer 0001-com-ubuntu-server-focal -Skus "20_04-LTS" -Version "latest" - } elseif ($ImageSku -eq "Ubuntu-18.04") { - $vmConfig = Set-AzVMSourceImage -VM $vmConfig -PublisherName Canonical -Offer UbuntuServer -Skus "18.04-LTS" -Version "latest" - } - } - if (-not $vmConfig) { - Add-LogMessage -Level Fatal "Could not determine which source image to use!" - } - $vmConfig = Set-AzVMOperatingSystem -VM $vmConfig -Linux -ComputerName $Name -Credential $adminCredentials -CustomData $CloudInitYaml - if (-not $NicId) { - $NicId = (Deploy-NetworkInterface -Name "${Name}-NIC" -ResourceGroupName $ResourceGroupName -Subnet $Subnet -PrivateIpAddress $PrivateIpAddress -Location $Location).Id - } - $vmConfig = Add-AzVMNetworkInterface -VM $vmConfig -Id $NicId -Primary - if ($OsDiskSizeGb) { - $vmConfig = Set-AzVMOSDisk -VM $vmConfig -StorageAccountType $OsDiskType -Name "$Name-OS-DISK" -CreateOption FromImage -DiskSizeInGB $OsDiskSizeGb - } else { - $vmConfig = Set-AzVMOSDisk -VM $vmConfig -StorageAccountType $OsDiskType -Name "$Name-OS-DISK" -CreateOption FromImage - } - $vmConfig = Set-AzVMBootDiagnostic -VM $vmConfig -Enable -ResourceGroupName $BootDiagnosticsAccount.ResourceGroupName -StorageAccountName $BootDiagnosticsAccount.StorageAccountName - # Add optional data disks - $lun = 0 - foreach ($diskId in $DataDiskIds) { - $vmConfig = Add-AzVMDataDisk -VM $vmConfig -ManagedDiskId $diskId -CreateOption Attach -Lun $lun - $lun += 1 - } - # Copy public key to VM - if ($AdminPublicSshKey) { - $vmConfig = Add-AzVMSshPublicKey -VM $vmConfig -KeyData $AdminPublicSshKey -Path "/home/$($AdminUsername)/.ssh/authorized_keys" - } - # Create VM - Add-LogMessage -Level Info "[ ] Creating virtual machine '$Name'" - try { - $null = New-AzVM -ResourceGroupName $ResourceGroupName -Location $Location -VM $vmConfig -ErrorAction Stop - Add-LogMessage -Level Success "Created virtual machine '$Name'" - } catch { - Add-LogMessage -Level Fatal "Failed to create virtual machine '$Name'! Check that your desired image is available in this region." -Exception $_.Exception - } - if (-not $NoWait) { - Start-Sleep 30 # wait for VM deployment to register - Wait-ForCloudInit -Name $Name -ResourceGroupName $ResourceGroupName - } - } else { - Add-LogMessage -Level InfoSuccess "Virtual machine '$Name' already exists" - } - return (Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName) -} -Export-ModuleMember -Function Deploy-LinuxVirtualMachine - - -# Create a managed disk if it does not exist -# ------------------------------------------ -function Deploy-ManagedDisk { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of disk to deploy")] - $Name, - [Parameter(Mandatory = $true, HelpMessage = "Disk size in GB")] - $SizeGB, - [Parameter(Mandatory = $true, HelpMessage = "Disk type (eg. Standard_GRS)")] - $Type, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - $ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - $Location - ) - Add-LogMessage -Level Info "Ensuring that managed disk '$Name' exists..." - $disk = Get-AzDisk -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating $SizeGB GB managed disk '$Name'" - $diskConfig = New-AzDiskConfig -Location $Location -DiskSizeGB $SizeGB -AccountType $Type -OsType Linux -CreateOption Empty - $disk = New-AzDisk -ResourceGroupName $ResourceGroupName -DiskName $Name -Disk $diskConfig - if ($?) { - Add-LogMessage -Level Success "Created managed disk '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create managed disk '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Managed disk '$Name' already exists" - } - return $disk -} -Export-ModuleMember -Function Deploy-ManagedDisk - - -# Deploy Azure Monitoring Extension on a VM -# ----------------------------------------- -function Deploy-VirtualMachineMonitoringExtension { - param( - [Parameter(Mandatory = $true, HelpMessage = "VM object")] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine]$VM, - [Parameter(Mandatory = $true, HelpMessage = "Log Analytics Workspace ID")] - [string]$WorkspaceId, - [Parameter(Mandatory = $true, HelpMessage = "Log Analytics Workspace key")] - [string]$WorkspaceKey - ) - if ($VM.OSProfile.WindowsConfiguration) { - # Install Monitoring Agent - Replacement is Azure Monitor Agent (https://docs.microsoft.com/en-us/azure/azure-monitor/agents/agents-overview?tabs=PowerShellWindows) - Set-VirtualMachineExtensionIfNotInstalled -VM $VM -Publisher "Microsoft.EnterpriseCloud.Monitoring" -Type "MicrosoftMonitoringAgent" -Version 1.0 -WorkspaceId $WorkspaceId -WorkspaceKey $WorkspaceKey - # # Install Dependency Agent - # Set-VirtualMachineExtensionIfNotInstalled -VM $VM -Publisher "Microsoft.Azure.Monitoring.DependencyAgent" -Type "DependencyAgentWindows" -Version 9.10 -WorkspaceId $WorkspaceId -WorkspaceKey $WorkspaceKey - } elseif ($VM.OSProfile.LinuxConfiguration) { - # Install Monitoring Agent - does not support Ubuntu 22.04. Replacement is Azure Monitor Agent (https://docs.microsoft.com/en-us/azure/azure-monitor/agents/agents-overview?tabs=PowerShellWindows) - Set-VirtualMachineExtensionIfNotInstalled -VM $VM -Publisher "Microsoft.EnterpriseCloud.Monitoring" -Type "OmsAgentForLinux" -EnableAutomaticUpgrade $true -Version 1.14 -WorkspaceId $WorkspaceId -WorkspaceKey $WorkspaceKey - # # Install Dependency Agent - not working with current Ubuntu 20.04 (https://docs.microsoft.com/en-us/answers/questions/938560/unable-to-enable-insights-on-ubuntu-2004-server.html) - # Set-VirtualMachineExtensionIfNotInstalled -VM $VM -Publisher "Microsoft.Azure.Monitoring.DependencyAgent" -Type "DependencyAgentLinux" -Version 9.10 -WorkspaceId $WorkspaceId -WorkspaceKey $WorkspaceKey - } else { - Add-LogMessage -Level Fatal "VM OSProfile not recognised. Cannot activate logging for VM '$($vm.Name)'!" - } -} -Export-ModuleMember -Function Deploy-VirtualMachineMonitoringExtension - - -# Get image ID -# ------------ -function Get-ImageFromGallery { - param( - [Parameter(Mandatory = $true, HelpMessage = "Image version to retrieve")] - [string]$ImageVersion, - [Parameter(Mandatory = $true, ParameterSetName = "ByImageDefinition", HelpMessage = "Image definition that image belongs to")] - [string]$ImageDefinition, - [Parameter(Mandatory = $true, ParameterSetName = "ByImageSku", HelpMessage = "Image SKU that image belongs to")] - [string]$ImageSku, - [Parameter(Mandatory = $true, HelpMessage = "Image gallery name")] - [string]$GalleryName, - [Parameter(Mandatory = $true, HelpMessage = "Resource group containing image gallery")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subscription containing image gallery")] - [string]$Subscription - ) - $originalContext = Get-AzContext - try { - $null = Set-AzContext -Subscription $Subscription -ErrorAction Stop - if (-not $ImageDefinition) { - try { - Add-LogMessage -Level Info "[ ] Looking for image definition corresponding to '$ImageSku'..." - $ImageDefinition = (Get-AzGalleryImageDefinition -ResourceGroupName $ResourceGroupName -GalleryName $GalleryName | Where-Object { $_.Identifier.Sku -eq $ImageSku })[0].Name - Add-LogMessage -Level Success "Interpreted $ImageSku as image definition $imageDefinition" - } catch { - Add-LogMessage -Level Fatal "Failed to interpret $ImageSku as an image type!" -Exception $_.Exception - } - } - Add-LogMessage -Level Info "Looking for image $imageDefinition version $imageVersion..." - try { - $image = Get-AzGalleryImageVersion -ResourceGroup $ResourceGroupName -GalleryName $GalleryName -GalleryImageDefinitionName $ImageDefinition -GalleryImageVersionName $ImageVersion -ErrorAction Stop - } catch [Microsoft.Azure.Commands.Compute.Common.ComputeCloudException] { - $versions = Get-AzGalleryImageVersion -ResourceGroup $ResourceGroupName -GalleryName $GalleryName -GalleryImageDefinitionName $ImageDefinition | Sort-Object Name | ForEach-Object { $_.Name } - Add-LogMessage -Level Error "Image version '$ImageVersion' is invalid. Available versions are: $versions" - $ImageVersion = $versions | Select-Object -Last 1 - $userVersion = Read-Host -Prompt "Enter the version you would like to use (or leave empty to accept the default: '$ImageVersion')" - if ($versions.Contains($userVersion)) { - $ImageVersion = $userVersion - } - $image = Get-AzGalleryImageVersion -ResourceGroup $ResourceGroupName -GalleryName $GalleryName -GalleryImageDefinitionName $ImageDefinition -GalleryImageVersionName $ImageVersion -ErrorAction Stop - } - if ($image) { - $commitHash = $image.Tags["Build commit hash"] - if ($commitHash) { - Add-LogMessage -Level Success "Found image $imageDefinition version $($image.Name) in gallery created from commit $commitHash" - } else { - Add-LogMessage -Level Success "Found image $imageDefinition version $($image.Name) in gallery" - } - } else { - Add-LogMessage -Level Fatal "Could not find image $imageDefinition version $ImageVersion in gallery!" - } - } catch { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - throw - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } - return $image -} -Export-ModuleMember -Function Get-ImageFromGallery - - -# Get all VMs for an SHM or SRE -# ----------------------------- -function Get-VMsByResourceGroupPrefix { - param( - [Parameter(Mandatory = $true, HelpMessage = "Prefix to match resource groups on")] - [string]$ResourceGroupPrefix - ) - $matchingResourceGroups = Get-AzResourceGroup | Where-Object { $_.ResourceGroupName -like "${ResourceGroupPrefix}_*" } - $matchingVMs = [ordered]@{} - foreach ($rg in $matchingResourceGroups) { - $rgVms = Get-AzVM -ResourceGroup $rg.ResourceGroupName - if ($rgVms) { - $matchingVMs[$rg.ResourceGroupName] = $rgVms - } - } - return $matchingVMs -} -Export-ModuleMember -Function Get-VMsByResourceGroupPrefix - - -# Get all VMs for an SHM or SRE -# ----------------------------- -function Get-VMState { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM to get the state of")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group VM belongs to")] - [string]$ResourceGroupName - ) - try { - return (Get-AzVM -ResourceGroupName $ResourceGroupName -Name $Name -Status -ErrorAction Stop).PowerState - } catch { - return "VM does not exist" - } -} -Export-ModuleMember -Function Get-VMState - - -# Run remote shell script -# ----------------------- -function Invoke-RemoteScript { - param( - [Parameter(Mandatory = $true, ParameterSetName = "ByPath", HelpMessage = "Path to local script that will be run remotely")] - [string]$ScriptPath, - [Parameter(Mandatory = $true, ParameterSetName = "ByString", HelpMessage = "Contents of script that will be run remotely")] - [string]$Script, - [Parameter(Mandatory = $true, HelpMessage = "Name of VM to run on")] - [string]$VMName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group VM belongs to")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "Type of script to run")] - [ValidateSet("PowerShell", "UnixShell")] - [string]$Shell = "PowerShell", - [Parameter(Mandatory = $false, HelpMessage = "Suppress script output on success")] - [switch]$SuppressOutput, - [Parameter(Mandatory = $false, HelpMessage = "(Optional) hashtable of script parameters")] - [System.Collections.IDictionary]$Parameter = $null - ) - # If we're given a script then create a file from it - $tmpScriptFile = $null - if ($Script) { - $tmpScriptFile = New-TemporaryFile - $Script | Out-File -FilePath $tmpScriptFile.FullName - $ScriptPath = $tmpScriptFile.FullName - } - # Validate any external parameters as non-string arguments or arguments containing special characters will cause Invoke-AzVMRunCommand to fail - $params = @{} - if ($Parameter) { $params["Parameter"] = $Parameter } - $params["CommandId"] = ($Shell -eq "PowerShell") ? "RunPowerShellScript" : "RunShellScript" - if ($params.Contains("Parameter")) { - foreach ($kv in $params["Parameter"].GetEnumerator()) { - if ($kv.Value -isnot [string]) { - Add-LogMessage -Level Fatal "$($kv.Key) argument ($($kv.Value)) must be a string!" - } - foreach ($unsafeCharacter in @("|", "&")) { - if ($kv.Value.Contains($unsafeCharacter)) { - Add-LogMessage -Level Fatal "$($kv.Key) argument ($($kv.Value)) contains '$unsafeCharacter' which will cause Invoke-AzVMRunCommand to fail. Consider encoding this variable in Base-64." - } - } - foreach ($whitespaceCharacter in @(" ", "`t")) { - if (($Shell -eq "UnixShell") -and ($kv.Value.Contains($whitespaceCharacter))) { - if (-not (($kv.Value[0] -eq "'") -or ($kv.Value[0] -eq '"'))) { - Write-Information -InformationAction "Continue" $kv.Value[0] - Add-LogMessage -Level Fatal "$($kv.Key) argument ($($kv.Value)) contains '$whitespaceCharacter' which will cause the shell script to fail. Consider wrapping this variable in single quotes." - } - } - } - } - } - try { - # Catch failures from running two commands in close proximity and rerun - while ($true) { - try { - $result = Invoke-AzVMRunCommand -Name $VMName -ResourceGroupName $ResourceGroupName -ScriptPath $ScriptPath @params -ErrorAction Stop - $success = $? - break - } catch [Microsoft.Azure.Commands.Compute.Common.ComputeCloudException] { - if (-not ($_.Exception.Message -match "Run command extension execution is in progress")) { throw } - } - } - } catch { - Add-LogMessage -Level Fatal "Running '$ScriptPath' on remote VM '$VMName' failed." -Exception $_.Exception - } - $success = $success -and ($result.Status -eq "Succeeded") - foreach ($outputStream in $result.Value) { - # Check for 'ComponentStatus//succeeded' as a signal of success - $success = $success -and (($outputStream.Code -split "/")[-1] -eq "succeeded") - # Check for ' [x] ' in the output stream as a signal of failure - if ($outputStream.Message -ne "") { - $success = $success -and ([string]($outputStream.Message) -NotLike '* `[x`] *') - } - } - # Clean up any temporary scripts - if ($tmpScriptFile) { Remove-Item $tmpScriptFile.FullName } - # Check for success or failure - if ($success) { - Add-LogMessage -Level Success "Remote script execution succeeded" - if (-not $SuppressOutput) { Write-Information -InformationAction "Continue" ($result.Value | Out-String) } - } else { - Add-LogMessage -Level Info "Script output:" - Write-Information -InformationAction "Continue" ($result | Out-String) - Add-LogMessage -Level Fatal "Remote script execution has failed. Please check the output above before re-running this script." - } - return $result -} -Export-ModuleMember -Function Invoke-RemoteScript - - -# Set Azure Monitoring Extension on a VM -# -------------------------------------- -function Set-VirtualMachineExtensionIfNotInstalled { - param( - [Parameter(Mandatory = $false, HelpMessage = "Extension publisher")] - [boolean]$EnableAutomaticUpgrade = $false, - [Parameter(Mandatory = $true, HelpMessage = "Extension publisher")] - [string]$Publisher, - [Parameter(Mandatory = $true, HelpMessage = "Extension type")] - [string]$Type, - [Parameter(Mandatory = $true, HelpMessage = "Extension version")] - [string]$Version, - [Parameter(Mandatory = $true, HelpMessage = "VM object")] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine]$VM, - [Parameter(Mandatory = $true, HelpMessage = "Log Analytics Workspace ID")] - [string]$WorkspaceId, - [Parameter(Mandatory = $true, HelpMessage = "Log Analytics Workspace key")] - [string]$WorkspaceKey - ) - Add-LogMessage -Level Info "[ ] Ensuring extension '$type' is installed on VM '$($VM.Name)'." - $extension = Get-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -ErrorAction SilentlyContinue | Where-Object { $_.Publisher -eq $Publisher -and $_.ExtensionType -eq $Type } - if ($extension -and $extension.ProvisioningState -ne "Succeeded") { - Add-LogMessage -Level Warning "Removing misconfigured extension '$type' installation on VM '$($VM.Name)'." - $null = Remove-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -Name $Type -Force - $extension = Get-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -Name $Type -ErrorAction SilentlyContinue - } - if ($extension) { - Add-LogMessage -Level InfoSuccess "Extension '$type' is already installed on VM '$($VM.Name)'." - } else { - foreach ($i in 1..5) { - try { - $null = Set-AzVMExtension -EnableAutomaticUpgrade $EnableAutomaticUpgrade ` - -ExtensionName $type ` - -ExtensionType $type ` - -Location $VM.location ` - -ProtectedSettings @{ "workspaceKey" = $WorkspaceKey } ` - -Publisher $publisher ` - -ResourceGroupName $VM.ResourceGroupName ` - -Settings @{ "workspaceId" = $WorkspaceId } ` - -TypeHandlerVersion $version ` - -VMName $VM.Name ` - -ErrorAction Stop - Start-Sleep 10 - $extension = Get-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -Name $Type -ErrorAction Stop - if ($extension -and $extension.ProvisioningState -eq "Succeeded") { - break - } - } catch { - $exception = $_.Exception - Start-Sleep 30 - } - } - $extension = Get-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -Name $Type -ErrorAction Stop - if ($extension -and $extension.ProvisioningState -eq "Succeeded") { - Add-LogMessage -Level Success "Installed extension '$type' on VM '$($VM.Name)'." - } else { - if ($exception) { - Add-LogMessage -Level Fatal "Failed to install extension '$type' on VM '$($VM.Name)'!" -Exception $exception - } else { - Add-LogMessage -Level Fatal "Failed to install extension '$type' on VM '$($VM.Name)'!" - } - } - } -} -Export-ModuleMember -Function Set-VirtualMachineExtensionIfNotInstalled - - -# Run Azure desired state configuration -# ------------------------------------- -function Invoke-AzureVmDesiredState { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of configuration file previously uploaded with Publish-AzVMDscConfiguration.")] - [string]$ArchiveBlobName, - [Parameter(Mandatory = $true, HelpMessage = "Name of Azure storage container where the configuration archive is located.")] - [string]$ArchiveContainerName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that contains the storage account containing the configuration archive.")] - [string]$ArchiveResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of Azure storage account containing the configuration archive.")] - [string]$ArchiveStorageAccountName, - [Parameter(Mandatory = $true, HelpMessage = "Name of the configuration function being invoked")] - [string]$ConfigurationName, - [Parameter(Mandatory = $false, HelpMessage = "Hash table that contains the arguments to the configuration function")] - [System.Collections.Hashtable]$ConfigurationParameters, - [Parameter(Mandatory = $true, HelpMessage = "Location of the VM being configured")] - [string]$VmLocation, - [Parameter(Mandatory = $true, HelpMessage = "Name of the VM being configured")] - [string]$VmName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that contains the VM being configured")] - [string]$VmResourceGroupName - ) - # Run remote configuration - Add-LogMessage -Level Info "Running desired state configuration '$ConfigurationName' on VM '$VmName'." - $params = @{} - if ($ConfigurationParameters) { $params["ConfigurationArgument"] = $ConfigurationParameters } - $maxTries = 3 - for ($attempt = 1; $attempt -le $maxTries; $attempt++) { - try { - $result = Set-AzVMDscExtension -ArchiveBlobName $ArchiveBlobName ` - -ArchiveContainerName $ArchiveContainerName ` - -ArchiveResourceGroupName $ArchiveResourceGroupName ` - -ArchiveStorageAccountName $ArchiveStorageAccountName ` - -ConfigurationName $ConfigurationName ` - -Location $VmLocation ` - -Name "DataSafeHavenDesiredState" ` - -ResourceGroupName $VmResourceGroupName ` - -Version "2.77" ` - -VMName $VmName ` - @params - break - } catch { - Add-LogMessage -Level Info "Applying desired state configuration failed. Attempt [$attempt/$maxTries]." - $ErrorMessage = $_.Exception - } - } - # Check for success or failure - if ($result.IsSuccessStatusCode) { - Add-LogMessage -Level Success "Ran desired state configuration '$ConfigurationName' on VM '$VmName'." - } else { - Add-LogMessage -Level Fatal "Failed to run desired state configuration '$ConfigurationName' on VM '$VmName'!`n${ErrorMessage}" - } - return $result -} -Export-ModuleMember -Function Invoke-AzureVmDesiredState - - -# Remove Virtual Machine disk -# --------------------------- -function Remove-ManagedDisk { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the disk to remove")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group containing the disk")] - [string]$ResourceGroupName - ) - - $null = Get-AzDisk -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level InfoSuccess "Disk '$Name' does not exist" - } else { - Add-LogMessage -Level Info "[ ] Removing disk '$Name'" - $null = Remove-AzDisk -Name $Name -ResourceGroupName $ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Removed disk '$Name'" - } else { - Add-LogMessage -Level Failure "Failed to remove disk '$Name'" - } - } -} -Export-ModuleMember -Function Remove-ManagedDisk - - -# Remove Virtual Machine -# ---------------------- -function Remove-VirtualMachine { - [CmdletBinding()] - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the VM to remove")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group containing the VM")] - [string]$ResourceGroupName, - [Parameter(HelpMessage = "Forces the command to run without asking for user confirmation.")] - [switch]$Force - ) - $vm = Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName -ErrorAction SilentlyContinue - if ($vm) { - # Get boot diagnostics details - $storageAccountName = [regex]::match($vm.DiagnosticsProfile.bootDiagnostics.storageUri, '^http[s]?://(.+?)\.').Groups[1].Value - $bootDiagnosticsContainerName = "bootdiagnostics-*-$($vm.VmId)" - # Remove VM - Add-LogMessage -Level Info "[ ] Removing VM '$($vm.Name)'" - $params = @{} - if ($Force) { $params["Force"] = $Force } - if ($ErrorAction) { $params["ErrorAction"] = $ErrorAction } - $null = $vm | Remove-AzVM @params - $success = $? - # Remove boot diagnostics container - Add-LogMessage -Level Info "[ ] Removing boot diagnostics account for '$($vm.Name)'" - $storageAccount = Get-AzStorageAccount | Where-Object { $_.StorageAccountName -eq $storageAccountName } - $null = $storageAccount | Get-AzStorageContainer | Where-Object { $_.Name -like $bootDiagnosticsContainerName } | Remove-AzStorageContainer -Force - $success = $success -and $? - if ($success) { - Add-LogMessage -Level Success "Removed VM '$Name'" - } else { - Add-LogMessage -Level Failure "Failed to remove VM '$Name'" - } - } else { - Add-LogMessage -Level InfoSuccess "VM '$Name' does not exist" - } -} -Export-ModuleMember -Function Remove-VirtualMachine - - -# Ensure VM is started, with option to force a restart -# ---------------------------------------------------- -function Start-VM { - param( - [Parameter(Mandatory = $true, HelpMessage = "Azure VM object", ParameterSetName = "ByObject", ValueFromPipeline = $true)] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine]$VM, - [Parameter(Mandatory = $true, HelpMessage = "Azure VM name", ParameterSetName = "ByName")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Azure VM resource group", ParameterSetName = "ByName")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "Skip this VM if it does not exist")] - [switch]$SkipIfNotExist, - [Parameter(Mandatory = $false, HelpMessage = "Force restart of VM if already running")] - [switch]$ForceRestart, - [Parameter(Mandatory = $false, HelpMessage = "Don't wait for VM (re)start operation to complete before returning")] - [switch]$NoWait - ) - # Get VM if not provided - if (-not $VM) { - try { - $VM = Get-AzVM -Name $Name -ResourceGroup $ResourceGroupName -ErrorAction Stop - } catch [Microsoft.Azure.Commands.Compute.Common.ComputeCloudException] { - if ($SkipIfNotExist) { return } - Add-LogMessage -Level Fatal "VM '$Name' could not be found in resource group '$ResourceGroupName'" -Exception $_.Exception - } - } - # Ensure VM is started but don't restart if already running - $operation = "start" - if (Confirm-VmRunning -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName) { - if ($ForceRestart) { - $operation = "restart" - Add-LogMessage -Level Info "[ ] Restarting VM '$($VM.Name)'" - $result = Restart-AzVM -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName -NoWait:$NoWait - } else { - Add-LogMessage -Level InfoSuccess "VM '$($VM.Name)' already running." - return - } - } elseif ((Confirm-VmDeallocated -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName) -or (Confirm-VmStopped -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName)) { - Add-LogMessage -Level Info "[ ] Starting VM '$($VM.Name)'" - $result = Start-AzVM -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName -NoWait:$NoWait - } else { - $vmStatus = (Get-AzVM -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName -Status).Statuses.Code - Add-LogMessage -Level Warning "VM '$($VM.Name)' not in supported status: $vmStatus. No action taken." - return - } - if ($result -is [Microsoft.Azure.Commands.Compute.Models.PSComputeLongRunningOperation]) { - # Synchronous operation requested - if ($result.Status -eq "Succeeded") { - Add-LogMessage -Level Success "VM '$($VM.Name)' successfully ${operation}ed." - } else { - # If (re)start failed, log error with failure reason - Add-LogMessage -Level Fatal "Failed to ${operation} VM '$($VM.Name)' [$($result.StatusCode): $($result.ReasonPhrase)]" - } - } elseif ($result -is [Microsoft.Azure.Commands.Compute.Models.PSAzureOperationResponse]) { - # Asynchronous operation requested - if (-not $result.IsSuccessStatusCode) { - Add-LogMessage -Level Fatal "Request to ${operation} VM '$($VM.Name)' failed [$($result.StatusCode): $($result.ReasonPhrase)]" - } else { - Add-LogMessage -Level Success "Request to ${operation} VM '$($VM.Name)' accepted." - } - } else { - Add-LogMessage -Level Fatal "Unrecognised return type from operation: '$($result.GetType().Name)'." - } -} -Export-ModuleMember -Function Start-VM - - -# Ensure VM is stopped (de-allocated) -# ----------------------------------- -function Stop-VM { - param( - [Parameter(Mandatory = $true, HelpMessage = "Azure VM object", ParameterSetName = "ByObject", ValueFromPipeline = $true)] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine]$VM, - [Parameter(Mandatory = $true, HelpMessage = "Azure VM name", ParameterSetName = "ByName")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Azure VM resource group", ParameterSetName = "ByName")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "Skip this VM if it does not exist")] - [switch]$SkipIfNotExist, - [Parameter(Mandatory = $false, HelpMessage = "Don't wait for VM deallocation operation to complete before returning")] - [switch]$NoWait - ) - # Get VM if not provided - if (-not $VM) { - try { - $VM = Get-AzVM -Name $Name -ResourceGroup $ResourceGroupName -ErrorAction Stop - } catch [Microsoft.Azure.Commands.Compute.Common.ComputeCloudException] { - if ($SkipIfNotExist) { return } - Add-LogMessage -Level Fatal "VM '$Name' could not be found in resource group '$ResourceGroupName'" -Exception $_.Exception - } - } - # Ensure VM is deallocated - if (Confirm-VmDeallocated -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName) { - Add-LogMessage -Level InfoSuccess "VM '$($VM.Name)' already stopped." - return - } else { - Add-LogMessage -Level Info "[ ] Stopping VM '$($VM.Name)'" - $result = Stop-AzVM -Name $VM.Name -ResourceGroupName $VM.ResourceGroupName -Force -NoWait:$NoWait - } - if ($result -is [Microsoft.Azure.Commands.Compute.Models.PSComputeLongRunningOperation]) { - # Synchronous operation requested - if ($result.Status -eq "Succeeded") { - Add-LogMessage -Level Success "VM '$($VM.Name)' stopped." - } else { - Add-LogMessage -Level Fatal "Failed to stop VM '$($VM.Name)' [$($result.Status): $($result.Error)]" - } - } elseif ($result -is [Microsoft.Azure.Commands.Compute.Models.PSAzureOperationResponse]) { - # Asynchronous operation requested - if (-not $result.IsSuccessStatusCode) { - Add-LogMessage -Level Fatal "Request to stop VM '$($VM.Name)' failed [$($result.StatusCode): $($result.ReasonPhrase)]" - } else { - Add-LogMessage -Level Success "Request to stop VM '$($VM.Name)' accepted." - } - } else { - Add-LogMessage -Level Fatal "Unrecognised return type from operation: '$($result.GetType().Name)'." - } -} -Export-ModuleMember -Function Stop-VM - - -# Wait for cloud-init provisioning to finish -# ------------------------------------------ -function Wait-ForCloudInit { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual machine to wait for")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group VM belongs to")] - [string]$ResourceGroupName - ) - # Poll VM to see whether it has finished running - Add-LogMessage -Level Info "Waiting for cloud-init provisioning to finish for $Name..." - $progress = 0 - $statuses = @() - while (-not ($statuses.Contains("ProvisioningState/succeeded") -and ($statuses.Contains("PowerState/stopped") -or $statuses.Contains("PowerState/deallocated")))) { - try { - $statuses = (Get-AzVM -Name $Name -ResourceGroupName $ResourceGroupName -Status -ErrorAction Stop).Statuses.Code - } catch [Microsoft.Azure.Commands.Compute.Common.ComputeCloudException] { - Add-LogMessage -Level Fatal "Could not retrieve VM status while waiting for cloud-init to finish!" -Exception $_.Exception - } catch { - Add-LogMessage -Level Fatal "Unknown error of type $($_.Exception.GetType()) occurred!" -Exception $_.Exception - } - $progress = [math]::min(100, $progress + 1) - Write-Progress -Activity "Deployment status" -Status "$($statuses[0]) $($statuses[1])" -PercentComplete $progress - Start-Sleep 10 - } - Add-LogMessage -Level Success "Cloud-init provisioning is finished for $Name" -} -Export-ModuleMember -Function Wait-ForCloudInit diff --git a/deployment/common/AzureDataProtection.psm1 b/deployment/common/AzureDataProtection.psm1 deleted file mode 100644 index f3dc0c85f2..0000000000 --- a/deployment/common/AzureDataProtection.psm1 +++ /dev/null @@ -1,222 +0,0 @@ -Import-Module Az.DataProtection -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - -$DataSourceMap = @{ - "blob" = "AzureBlob" - "disk" = "AzureDisk" -} - -# Deploy a data protection backup vault -# ------------------------------------- -function Deploy-DataProtectionBackupVault { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of backup resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault")] - [string]$VaultName, - [Parameter(Mandatory = $true, HelpMessage = "Location of data protection backup vault")] - [string]$Location - ) - # Check if vault exists - Add-LogMessage -Level Info "Ensuring that backup vault '$VaultName' exists..." - try { - $Vault = Get-AzDataProtectionBackupVault -ResourceGroupName $ResourceGroupName ` - -VaultName $VaultName ` - -ErrorAction Stop - Add-LogMessage -Level InfoSuccess "Backup vault '$VaultName' already exists" - } catch { - Add-LogMessage -Level Info "[ ] Creating backup vault '$VaultName'" - $storagesetting = New-AzDataProtectionBackupVaultStorageSettingObject -DataStoreType VaultStore -Type LocallyRedundant - # Create backup vault - # The SystemAssigned identity is necessary to give the backup vault - # appropriate permissions to backup resources. - $Vault = New-AzDataProtectionBackupVault -ResourceGroupName $ResourceGroupName ` - -VaultName $VaultName ` - -StorageSetting $storagesetting ` - -Location $Location ` - -IdentityType "SystemAssigned" - if ($?) { - Add-LogMessage -Level Success "Successfully deployed backup vault $VaultName" - } else { - Add-LogMessage -Level Fatal "Failed to deploy backup vault $VaultName" - } - } - return $Vault -} -Export-ModuleMember -Function Deploy-DataProtectionBackupVault - - -# Deploy a data protection backup instance -# ---------------------------------------- -function Deploy-DataProtectionBackupInstance { - param( - [Parameter(Mandatory = $true, HelpMessage = "ID of the backup policy to apply")] - [string]$BackupPolicyId, - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault")] - [string]$VaultName, - [Parameter(Mandatory = $true, HelpMessage = "backup data source type")] - [ValidateScript({ $_ -in $DataSourceMap.Keys })] - [string]$DataSourceType, - [Parameter(Mandatory = $true, HelpMessage = "ID of the resource to enable backup on")] - [String]$DataSourceId, - [Parameter(Mandatory = $true, HelpMessage = "Location of the resource to enable backup on")] - [String]$DataSourceLocation, - [Parameter(Mandatory = $true, HelpMessage = "Name of the resource to enable backup on")] - [String]$DataSourceName - ) - Add-LogMessage -Level Info "Ensuring backup instance for '$DataSourceName' exists" - $instance = Get-AzDataProtectionBackupInstance -ResourceGroupName $ResourceGroupName -VaultName $VaultName -ErrorAction SilentlyContinue | Where-Object { $_.Name -like "$DataSourceName*" } - if ($instance) { - Add-LogMessage -Level InfoSuccess "Backup instance for '$DataSourceName' already exists" - } else { - $success = $False - $maxTries = 5 - for ($attempt = 1; $attempt -le $maxTries; $attempt++) { - try { - Add-LogMessage -Level Info "[ ] Creating backup instance for '$DataSourceName'" - $initialisation = Initialize-AzDataProtectionBackupInstance -DatasourceType $DataSourceMap[$DataSourceType] ` - -DatasourceLocation $DataSourceLocation ` - -PolicyId $BackupPolicyId ` - -DatasourceId $DataSourceId ` - -ErrorAction Stop - if ($DataSourceType -eq 'disk') { - # Set resource group to hold snapshots - $backup_rg_id = (Get-AzResourceGroup | Where-Object { $_.ResourceGroupName -eq $ResourceGroupName }).ResourceId - $initialisation.Property.PolicyInfo.PolicyParameter.DataStoreParametersList[0].ResourceGroupId = $backup_rg_id - } - $instance = New-AzDataProtectionBackupInstance -ResourceGroupName $ResourceGroupName ` - -VaultName $VaultName ` - -BackupInstance $initialisation ` - -ErrorAction Stop - Add-LogMessage -Level Success "Successfully created backup instance for '$DataSourceName'" - $success = $True - break - } catch { - Add-LogMessage -Level Info "[x] Creating backup instance for '$DataSourceName' failed. Attempt [$attempt/$maxTries]." - $ErrorMessage = $_.Exception - } - } - # Throw an exception if backup instance could not be created - if (-not $success) { - Add-LogMessage -Level Fatal "Failed to create backup instance for '$DataSourceName'!`n${ErrorMessage}" - } - } - return $instance -} -Export-ModuleMember -Function Deploy-DataProtectionBackupInstance - -# Deploy a data protection backup policy -# Currently only supports hard-coded policies -# ------------------------------------------- -function Deploy-DataProtectionBackupPolicy { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of backup resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault")] - [string]$VaultName, - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup policy")] - [string]$PolicyName, - [Parameter(Mandatory = $true, HelpMessage = "backup data source type")] - [ValidateScript({ $_ -in $DataSourceMap.Keys })] - [string]$DataSourceType - ) - - Add-LogMessage -Level Info "Ensuring backup policy '$PolicyName' exists" - try { - $Policy = Get-AzDataProtectionBackupPolicy -Name $PolicyName ` - -ResourceGroupName $ResourceGroupName ` - -VaultName $VaultName ` - -ErrorAction Stop - Add-LogMessage -Level InfoSuccess "Backup policy '$PolicyName' already exists" - } catch { - Add-LogMessage -Level Info "[ ] Creating backup policy '$PolicyName'" - # Get default policy template for the data source type - $Template = Get-AzDataProtectionPolicyTemplate -DatasourceType $DataSourceMap[$DataSourceType] - - # Modify default policy template - if ($DataSourceType -eq 'disk') { - # Add life cycle for retention of weekly snapshots for 12 weeks - # This is in addition to the retention of of snapshots for 7 days - $Lifecycle = New-AzDataProtectionRetentionLifeCycleClientObject -SourceDataStore OperationalStore ` - -SourceRetentionDurationType Weeks ` - -SourceRetentionDurationCount 12 - Edit-AzDataProtectionPolicyRetentionRuleClientObject -Policy $Template ` - -Name Weekly ` - -LifeCycles $Lifecycle ` - -IsDefault $false - $Criteria = New-AzDataProtectionPolicyTagCriteriaClientObject -AbsoluteCriteria FirstOfWeek - Edit-AzDataProtectionPolicyTagClientObject -Policy $Template ` - -Name Weekly ` - -Criteria $Criteria - # Daily backup schedule at 02:00:00 (In the system's time zone) - $Schedule = New-AzDataProtectionPolicyTriggerScheduleClientObject -ScheduleDays (Get-Date -Hour 2 -Minute 0 -Second 0) ` - -IntervalType Daily ` - -IntervalCount 1 - Edit-AzDataProtectionPolicyTriggerClientObject -Policy $Template ` - -Schedule $Schedule - } elseif ($DataSourcetype -eq 'blob') { - Edit-AzDataProtectionPolicyTriggerClientObject -Policy $Template -RemoveSchedule - # Change default retention period to 12 weeks - $lifeCycleOperationalTier = New-AzDataProtectionRetentionLifeCycleClientObject -SourceDataStore OperationalStore ` - -SourceRetentionDurationType Weeks ` - -SourceRetentionDurationCount 12 - Edit-AzDataProtectionPolicyRetentionRuleClientObject -Policy $Template -Name Default -LifeCycles $lifeCycleOperationalTier -IsDefault $true -OverwriteLifeCycle $true - } - $Policy = New-AzDataProtectionBackupPolicy -ResourceGroupName $ResourceGroupName ` - -VaultName $VaultName ` - -Name $PolicyName ` - -Policy $Template - if ($?) { - Add-LogMessage -Level Success "Successfully deployed backup policy $PolicyName" - } else { - Add-LogMessage -Level Fatal "Failed to deploy backup policy $PolicyName" - } - } - return $Policy -} -Export-ModuleMember -Function Deploy-DataProtectionBackupPolicy - - -# Remove all data protection backup instances -# ------------------------------------------- -function Remove-DataProtectionBackupInstances { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault")] - [string]$VaultName - ) - try { - $Instances = Get-AzDataProtectionBackupInstance -ResourceGroupName $ResourceGroupName -VaultName $VaultName -ErrorAction SilentlyContinue - if ($Instances) { - Add-LogMessage -Level Info "Attempting to remove backup instances from vault '$VaultName' in resource group '$ResourceGroupName'..." - $null = $Instances | Remove-AzDataProtectionBackupInstance -ErrorAction Stop - Add-LogMessage -Level Success "Removed backup instances from vault '$VaultName' in resource group '$ResourceGroupName'" - } - } catch { - Add-LogMessage -Level Fatal "Failed to remove backup instances from vault '$VaultName' in resource group '$ResourceGroupName'!" - } -} -Export-ModuleMember -Function Remove-DataProtectionBackupInstances - -# Remove all disk snapshots in backup resource group -# -------------------------------------------------- -function Remove-DataProtectionBackupDiskSnapshots { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of data protection backup vault resource group")] - [string]$ResourceGroupName - ) - try { - $disks = Get-AzSnapshot -ResourceGroupName $ResourceGroupName -ErrorAction SilentlyContinue - if ($disks) { - Add-LogMessage -Level Info "Attempting to remove backup disk snapshots from resource group '$ResourceGroupName'..." - $null = $disks | Remove-AzSnapshot -Force -ErrorAction Stop - Add-LogMessage -Level Success "Removed backup disk snapshots from resource group '$ResourceGroupName'" - } - } catch { - Add-LogMessage -Level Fatal "Failed to remove backup disk snapshots from resource group '$ResourceGroupName'!" - } -} -Export-ModuleMember -Function Remove-DataProtectionBackupDiskSnapshots diff --git a/deployment/common/AzureDns.psm1 b/deployment/common/AzureDns.psm1 deleted file mode 100644 index 161b2349c9..0000000000 --- a/deployment/common/AzureDns.psm1 +++ /dev/null @@ -1,221 +0,0 @@ -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Dns -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -function Deploy-DnsRecord { - param( - [Parameter(Mandatory = $true, HelpMessage = "DNS records")] - [Microsoft.Azure.Commands.Dns.DnsRecordBase[]]$DnsRecords, - [Parameter(Mandatory = $true, HelpMessage = "Name of record")] - [string]$RecordName, - [Parameter(Mandatory = $true, HelpMessage = "Type of record")] - [string]$RecordType, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS subscription")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "TTL seconds for the DNS records")] - [uint]$TtlSeconds, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to add the records to")] - [string]$ZoneName - ) - $originalContext = Get-AzContext - try { - $null = Set-AzContext -Subscription $SubscriptionName -ErrorAction Stop - Remove-AzDnsRecordSet -Name $RecordName -RecordType $RecordType -ZoneName $ZoneName -ResourceGroupName $ResourceGroupName - $null = New-AzDnsRecordSet -DnsRecords $DnsRecords -Name $RecordName -RecordType $RecordType -ResourceGroupName $ResourceGroupName -Ttl $TtlSeconds -ZoneName $ZoneName -ErrorAction Stop - } catch { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - throw - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } - return -} -Export-ModuleMember -Function Deploy-DnsRecord - - -# Add A (and optionally CNAME) DNS records -# ---------------------------------------- -function Deploy-DnsRecordCollection { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS subscription")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to add the records to")] - [string]$ZoneName, - [Parameter(Mandatory = $true, HelpMessage = "Public IP address for this record to point to")] - [string]$PublicIpAddress, - [Parameter(Mandatory = $false, HelpMessage = "Name of 'A' record")] - [string]$RecordNameA = "@", - [Parameter(Mandatory = $false, HelpMessage = "Name of certificate provider for CAA record")] - [string]$RecordNameCAA = $null, - [Parameter(Mandatory = $false, HelpMessage = "Name of 'CNAME' record (if none is provided then no CNAME redirect will be set up)")] - [string]$RecordNameCName = $null, - [Parameter(Mandatory = $false, HelpMessage = "TTL seconds for the DNS records")] - [uint]$TtlSeconds = 30 - ) - Add-LogMessage -Level Info "Adding DNS records for DNS zone '$ZoneName'..." - try { - # Set the A record - Add-LogMessage -Level Info "[ ] Setting 'A' record to '$PublicIpAddress' for DNS zone '$ZoneName'" - Deploy-DnsRecord -DnsRecords (New-AzDnsRecordConfig -Ipv4Address $PublicIpAddress) -RecordName $RecordNameA -RecordType "A" -ResourceGroupName $ResourceGroupName -Subscription $SubscriptionName -TtlSeconds $TtlSeconds -ZoneName $ZoneName - Add-LogMessage -Level Success "Set 'A' record to '$PublicIpAddress' for DNS zone '$ZoneName'" - # Set the CNAME record - if ($RecordNameCName) { - Add-LogMessage -Level Info "[ ] Setting CNAME record '$RecordNameCName' to point to the 'A' record for DNS zone '$ZoneName'" - Deploy-DnsRecord -DnsRecords (New-AzDnsRecordConfig -Cname $ZoneName) -RecordName $RecordNameCName -RecordType "CNAME" -ResourceGroupName $ResourceGroupName -Subscription $SubscriptionName -TtlSeconds $TtlSeconds -ZoneName $ZoneName - Add-LogMessage -Level Success "Set 'CNAME' record to '$RecordNameCName' to point to the 'A' record for DNS zone '$ZoneName'" - } - # Set the CAA record - if ($RecordNameCAA) { - Add-LogMessage -Level Info "[ ] Setting CAA record for $ZoneName to state that certificates will be provided by $RecordNameCAA" - Deploy-DnsRecord -DnsRecords (New-AzDnsRecordConfig -CaaFlags 0 -CaaTag "issue" -CaaValue $RecordNameCAA) -RecordName "@" -RecordType "CAA" -ResourceGroupName $ResourceGroupName -Subscription $SubscriptionName -TtlSeconds $TtlSeconds -ZoneName $ZoneName - Add-LogMessage -Level Success "Set 'CAA' record for '$ZoneName' to state that certificates will be provided by $RecordNameCAA" - } - } catch { - Add-LogMessage -Level Fatal "Failed to add DNS records for DNS zone '$ZoneName'!" -Exception $_.Exception - } -} -Export-ModuleMember -Function Deploy-DnsRecordCollection - - -# Get NS Records -# -------------- -function Get-NSRecords { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of record set")] - [string]$RecordSetName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone")] - [string]$DnsZoneName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName - ) - Add-LogMessage -Level Info "Reading NS records '$($RecordSetName)' for DNS Zone '$($DnsZoneName)'..." - $recordSet = Get-AzDnsRecordSet -ZoneName $DnsZoneName -ResourceGroupName $ResourceGroupName -Name $RecordSetName -RecordType "NS" - return $recordSet.Records -} -Export-ModuleMember -Function Get-NSRecords - - -# Create DNS Zone if it does not exist -# ------------------------------------ -function New-DNSZone { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName - ) - Add-LogMessage -Level Info "Ensuring that DNS zone '$($Name)' exists..." - $null = Get-AzDnsZone -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating DNS Zone '$Name'" - $null = New-AzDnsZone -Name $Name -ResourceGroupName $ResourceGroupName - if ($?) { - Add-LogMessage -Level Success "Created DNS Zone '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create DNS Zone '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "DNS Zone '$Name' already exists" - } -} -Export-ModuleMember -Function New-DNSZone - - -# Remove a DNS record if it exists -# -------------------------------- -function Remove-DnsRecord { - param( - [Parameter(Mandatory = $false, HelpMessage = "Name of 'A' record")] - [string]$RecordName, - [Parameter(Mandatory = $false, HelpMessage = "Name of 'A' record")] - [string]$RecordType, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS subscription")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to add the records to")] - [string]$ZoneName - ) - $originalContext = Get-AzContext - try { - Add-LogMessage -Level Info "[ ] Removing '$RecordName' $RecordType record from DNS zone $ZoneName" - $null = Set-AzContext -SubscriptionId $SubscriptionName -ErrorAction Stop - Remove-AzDnsRecordSet -Name $RecordName -RecordType $RecordType -ZoneName $ZoneName -ResourceGroupName $ResourceGroupName -ErrorAction Stop - Add-LogMessage -Level Success "DNS record removal succeeded" - } catch { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - Add-LogMessage -Level Fatal "DNS record removal failed!" -Exception $_.Exception - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } -} -Export-ModuleMember -Function Remove-DnsRecord - - -# Add NS Record Set to DNS Zone if it does not already exist -# --------------------------------------------------------- -function Set-DnsZoneAndParentNSRecords { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to create")] - [string]$DnsZoneName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group holding DNS zones")] - [string]$ResourceGroupName - ) - # Get subdomain and parent domain - $subdomain = $DnsZoneName.Split('.')[0] - $parentDnsZoneName = $DnsZoneName -replace "$subdomain.", "" - - # Create DNS Zone - New-DNSZone -Name $DnsZoneName -ResourceGroupName $ResourceGroupName - - # Get NS records from the new DNS Zone - Add-LogMessage -Level Info "Get NS records from the new DNS Zone..." - $nsRecords = Get-NSRecords -RecordSetName "@" -DnsZoneName $DnsZoneName -ResourceGroupName $ResourceGroupName - - # Check if parent DNS Zone exists in same subscription and resource group - $null = Get-AzDnsZone -Name $parentDnsZoneName -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "No existing DNS Zone was found for '$parentDnsZoneName' in resource group '$ResourceGroupName'." - Add-LogMessage -Level Info "You need to add the following NS records to the parent DNS system for '$parentDnsZoneName': '$nsRecords'" - } else { - # Add NS records to the parent DNS Zone - Add-LogMessage -Level Info "Add NS records to the parent DNS Zone..." - Set-NSRecords -RecordSetName $subdomain -DnsZoneName $parentDnsZoneName -ResourceGroupName $ResourceGroupName -NsRecords $nsRecords - } -} -Export-ModuleMember -Function Set-DnsZoneAndParentNSRecords - - -# Add NS Record Set to DNS Zone if it doesn't already exist -# --------------------------------------------------------- -function Set-NSRecords { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of record set")] - [string]$RecordSetName, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone")] - [string]$DnsZoneName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "NS records to add")] - $NsRecords - ) - $null = Get-AzDnsRecordSet -ResourceGroupName $ResourceGroupName -ZoneName $DnsZoneName -Name $RecordSetName -RecordType NS -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "Creating new Record Set '$($RecordSetName)' in DNS Zone '$($DnsZoneName)' with NS records '$($nsRecords)' to ..." - $null = New-AzDnsRecordSet -Name $RecordSetName -ZoneName $DnsZoneName -ResourceGroupName $ResourceGroupName -Ttl 3600 -RecordType NS -DnsRecords $NsRecords - if ($?) { - Add-LogMessage -Level Success "Created DNS Record Set '$RecordSetName'" - } else { - Add-LogMessage -Level Fatal "Failed to create DNS Record Set '$RecordSetName'!" - } - } else { - # It's not straightforward to modify existing record sets idempotently so if the set already exists we do nothing - Add-LogMessage -Level InfoSuccess "DNS record set '$RecordSetName' already exists. Will not update!" - } -} -Export-ModuleMember -Function Set-NSRecords diff --git a/deployment/common/AzureKeyVault.psm1 b/deployment/common/AzureKeyVault.psm1 deleted file mode 100644 index 7f3076d8cb..0000000000 --- a/deployment/common/AzureKeyVault.psm1 +++ /dev/null @@ -1,195 +0,0 @@ -Import-Module Az.KeyVault -ErrorAction Stop -Import-Module $PSScriptRoot/Cryptography -ErrorAction Stop -Import-Module $PSScriptRoot/DataStructures -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Create a key vault if it does not exist -# --------------------------------------- -function Deploy-KeyVault { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of disk to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location - ) - Add-LogMessage -Level Info "Ensuring that key vault '$Name' exists..." - $keyVault = Get-AzKeyVault -VaultName $Name -ResourceGroupName $ResourceGroupName -ErrorAction SilentlyContinue -WarningAction SilentlyContinue - if ($null -eq $keyVault) { - # Purge any existing soft-deleted key vault - foreach ($existingLocation in (Get-AzLocation | ForEach-Object { $_.Location })) { - try { - if (Get-AzKeyVault -VaultName $Name -Location $existingLocation -InRemovedState -ErrorAction Stop -WarningAction SilentlyContinue) { - Add-LogMessage -Level Info "Purging a soft-deleted key vault '$Name' in $existingLocation" - Remove-AzKeyVault -VaultName $Name -Location $existingLocation -InRemovedState -Force -WarningAction SilentlyContinue | Out-Null - if ($?) { - Add-LogMessage -Level Success "Purged key vault '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to purge key vault '$Name'!" - } - } - } catch [Microsoft.Rest.Azure.CloudException] { - continue # Running Get-AzKeyVault on a location which does not support soft-deleted key vaults causes an error which we catch here - } - } - # Create a new key vault - Add-LogMessage -Level Info "[ ] Creating key vault '$Name'" - $keyVault = New-AzKeyVault -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -WarningAction SilentlyContinue - if ($?) { - Add-LogMessage -Level Success "Created key vault '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create key vault '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Key vault '$Name' already exists" - } - return $keyVault -} -Export-ModuleMember -Function Deploy-KeyVault - - -# Purge a secret from the keyvault -# -------------------------------- -function Remove-AndPurgeKeyVaultSecret { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of secret")] - [ValidateNotNullOrEmpty()] - [string]$SecretName, - [Parameter(Mandatory = $true, HelpMessage = "Name of key vault this secret belongs to")] - [ValidateNotNullOrEmpty()] - [string]$VaultName - ) - Remove-AzKeyVaultSecret -VaultName $VaultName -Name $SecretName -Force -ErrorAction Stop - # Wait up to five minutes for the secret to show up as purgeable - for ($i = 0; $i -lt 30; $i++) { - if (Get-AzKeyVaultSecret -VaultName $VaultName -Name $SecretName -InRemovedState) { - Remove-AzKeyVaultSecret -VaultName $VaultName -Name $SecretName -InRemovedState -Force -ErrorAction Stop - break - } - Start-Sleep -Seconds 10 - } -} -Export-ModuleMember -Function Remove-AndPurgeKeyVaultSecret - - -# Return a certificate with a valid private key if it exists, otherwise remove and purge any certificate with this name -# --------------------------------------------------------------------------------------------------------------------- -function Resolve-KeyVaultPrivateKeyCertificate { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of secret")] - [ValidateNotNullOrEmpty()] - [string]$CertificateName, - [Parameter(Mandatory = $true, HelpMessage = "Name of key vault this secret belongs to")] - [ValidateNotNullOrEmpty()] - [string]$VaultName - ) - # Return existing certificate if it exists and has a private key - $existingCert = Get-AzKeyVaultCertificate -VaultName $VaultName -Name $CertificateName - $privateKey = Get-AzKeyVaultSecret -VaultName $VaultName -Name $CertificateName -AsPlainText - if ($existingCert -and $privateKey) { - Add-LogMessage -Level InfoSuccess "Found existing certificate with private key" - return $existingCert - } - # Remove any existing certificate with this name - Remove-AzKeyVaultCertificate -VaultName $VaultName -Name $CertificateName -Force -ErrorAction SilentlyContinue - Wait-For -Target "removal of old certificate to complete" -Seconds 30 - # Purge any removed certificate with this name - $removedCert = Get-AzKeyVaultCertificate -VaultName $VaultName -Name $CertificateName -InRemovedState - if ($removedCert) { - Remove-AzKeyVaultCertificate -VaultName $VaultName -Name $CertificateName -InRemovedState -Force -ErrorAction SilentlyContinue - Wait-For -Target "pruning of old certificate to complete" -Seconds 30 - } - return $false -} -Export-ModuleMember -Function Resolve-KeyVaultPrivateKeyCertificate - - -# Ensure that a password is in the keyvault -# ----------------------------------------- -function Resolve-KeyVaultSecret { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of secret")] - [ValidateNotNullOrEmpty()] - [string]$SecretName, - [Parameter(Mandatory = $true, HelpMessage = "Name of key vault this secret belongs to")] - [ValidateNotNullOrEmpty()] - [string]$VaultName, - [Parameter(Mandatory = $false, HelpMessage = "Default value for this secret")] - [string]$DefaultValue, - [Parameter(Mandatory = $false, HelpMessage = "Default number of random characters to be used when initialising this secret")] - [string]$DefaultLength, - [Parameter(Mandatory = $false, HelpMessage = "Overwrite any existing secret with this name")] - [switch]$ForceOverwrite, - [Parameter(Mandatory = $false, HelpMessage = "Retrieve secret as plaintext instead of as a secure string")] - [switch]$AsPlaintext - ) - # Create a new secret if one does not exist in the key vault or if we are forcing an overwrite - if ($ForceOverwrite -or (-not (Get-AzKeyVaultSecret -Name $SecretName -VaultName $VaultName))) { - # If no default is provided then we cannot generate a secret - if ((-not $DefaultValue) -and (-not $DefaultLength)) { - Add-LogMessage -Level Fatal "Secret '$SecretName does not exist and no default value or length was provided!" - } - # If both defaults are provided then we do not know which to use - if ($DefaultValue -and $DefaultLength) { - Add-LogMessage -Level Fatal "Both a default value and a default length were provided. Please only use one of these options!" - } - # Generate a new password if there is no default value - if (-not $DefaultValue) { - $DefaultValue = $(New-Password -Length $DefaultLength) - } - # Store the password in the keyvault - try { - $null = Undo-AzKeyVaultSecretRemoval -Name $SecretName -VaultName $VaultName -ErrorAction SilentlyContinue # if the key has been soft-deleted we need to restore it before doing anything else - Start-Sleep 10 - $null = Set-AzKeyVaultSecret -Name $SecretName -VaultName $VaultName -SecretValue (ConvertTo-SecureString $DefaultValue -AsPlainText -Force) -ErrorAction Stop - } catch [Microsoft.Azure.KeyVault.Models.KeyVaultErrorException] { - Add-LogMessage -Level Fatal "Failed to create '$SecretName' in key vault '$VaultName'" -Exception $_.Exception - } - } - # Retrieve the secret from the key vault and return its value - $secret = Get-AzKeyVaultSecret -Name $SecretName -VaultName $VaultName - if ($AsPlaintext) { return $secret.SecretValue | ConvertFrom-SecureString -AsPlainText } - return $secret.SecretValue -} -Export-ModuleMember -Function Resolve-KeyVaultSecret - - -# Set key vault permissions to the group and remove the user who deployed it -# -------------------------------------------------------------------------- -function Set-KeyVaultPermissions { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of key vault to set the permissions on")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of group to give permissions to")] - [string]$GroupName - ) - Add-LogMessage -Level Info "Giving group '$GroupName' access to key vault '$Name'..." - # We need to catch both an exception from `Get-AzADGroup` and a non-existent group which returns $null - try { - $securityGroupId = (Get-AzADGroup -DisplayName $GroupName).Id | Select-Object -First 1 - } catch [Microsoft.Azure.Commands.ActiveDirectory.GetAzureADGroupCommand] { - $securityGroupId = $null - } - if ($null -eq $securityGroupId) { - Add-LogMessage -Level Fatal "Could not identify an Azure security group called $GroupName!" - } - Set-AzKeyVaultAccessPolicy -VaultName $Name ` - -ObjectId $securityGroupId ` - -PermissionsToKeys Get, List, Update, Create, Import, Delete, Backup, Restore, Recover, Purge ` - -PermissionsToSecrets Get, List, Set, Delete, Recover, Backup, Restore, Purge ` - -PermissionsToCertificates Get, List, Delete, Create, Import, Update, Managecontacts, Getissuers, Listissuers, Setissuers, Deleteissuers, Manageissuers, Recover, Backup, Restore, Purge ` - -WarningAction SilentlyContinue - $success = $? - foreach ($accessPolicy in (Get-AzKeyVault $Name -WarningAction SilentlyContinue).AccessPolicies | Where-Object { $_.ObjectId -ne $securityGroupId }) { - Remove-AzKeyVaultAccessPolicy -VaultName $Name -ObjectId $accessPolicy.ObjectId -WarningAction SilentlyContinue - $success = $success -and $? - } - if ($success) { - Add-LogMessage -Level Success "Set correct access policies for key vault '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to set correct access policies for key vault '$Name'!" - } -} -Export-ModuleMember -Function Set-KeyVaultPermissions diff --git a/deployment/common/AzureMonitor.psm1 b/deployment/common/AzureMonitor.psm1 deleted file mode 100644 index ec7fc67e41..0000000000 --- a/deployment/common/AzureMonitor.psm1 +++ /dev/null @@ -1,80 +0,0 @@ -Import-Module Az.Monitor -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Create an Azure Monitor Private Link Scope -# ------------------------------------------ -function Deploy-MonitorPrivateLinkScope { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of private link scope to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName - ) - Add-LogMessage -Level Info "Ensuring that private link scope '$Name' exists..." - $link = Get-AzInsightsPrivateLinkScope -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating private link scope '$Name'" - $link = New-AzInsightsPrivateLinkScope -Location "Global" -ResourceGroupName $ResourceGroupName -Name $Name - if ($?) { - Add-LogMessage -Level Success "Created private link scope '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create private link scope '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Private link scope '$Name' already exists" - } - return $link -} -Export-ModuleMember -Function Deploy-MonitorPrivateLinkScope - - -# Connect a log workspace to a private link -# ----------------------------------------- -function Connect-PrivateLinkToLogWorkspace { - param( - [Parameter(Mandatory = $true, HelpMessage = "Log analytics workspace to connect")] - [Microsoft.Azure.Management.Internal.Network.Common.IOperationalInsightWorkspace]$LogAnalyticsWorkspace, - [Parameter(Mandatory = $true, HelpMessage = "Name of private link scope to deploy")] - [Microsoft.Azure.Commands.Insights.OutputClasses.PSMonitorPrivateLinkScope]$PrivateLinkScope - ) - Add-LogMessage -Level Info "Ensuring that log analytics workspace '$($LogAnalyticsWorkspace.Name)' is connected to private link '$($PrivateLinkScope.Name)'..." - $ResourceGroupName = (Get-AzResource | Where-Object { $_.Name -eq $PrivateLinkScope.Name }).ResourceGroupName - $resource = Get-AzInsightsPrivateLinkScopedResource -ScopeName $PrivateLinkScope.Name -ResourceGroupName $ResourceGroupName | Where-Object { $_.LinkedResourceId -eq $LogAnalyticsWorkspace.ResourceId } #| Select-Object -First 1 -ErrorAction Stop - if ($resource.Count -gt 1) { $resource = $resource[0] } # Note. Select-Object on the previous command causes a PipelineStoppedException - if (-not $resource) { - Add-LogMessage -Level Info "[ ] Connecting log analytics workspace '$($LogAnalyticsWorkspace.Name)' to private link '$($PrivateLinkScope.Name)'" - $resource = New-AzInsightsPrivateLinkScopedResource -LinkedResourceId $LogAnalyticsWorkspace.ResourceId -ResourceGroupName $ResourceGroupName -ScopeName $PrivateLinkScope.Name -Name "scoped-link-$($LogAnalyticsWorkspace.Name)" - if ($?) { - Add-LogMessage -Level Success "Connected log analytics workspace '$($LogAnalyticsWorkspace.Name)' to private link '$($PrivateLinkScope.Name)'" - } else { - Add-LogMessage -Level Fatal "Failed to connect log analytics workspace '$($LogAnalyticsWorkspace.Name)' to private link '$($PrivateLinkScope.Name)'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Log analytics workspace '$($LogAnalyticsWorkspace.Name)' is already connected to private link '$($PrivateLinkScope.Name)'" - } - return $resource -} -Export-ModuleMember -Function Connect-PrivateLinkToLogWorkspace - - -# Connect resource to logging workspace -# ------------------------------------- -function Set-LogAnalyticsDiagnostics { - param( - [Parameter(Mandatory = $true, HelpMessage = "Resource to set diagnostics on")] - [string]$ResourceId, - [Parameter(Mandatory = $true, HelpMessage = "Log analytics workspace to connect")] - [string]$ResourceName, - [Parameter(Mandatory = $true, HelpMessage = "Log analytics workspace to store the diagnostics")] - [string]$WorkspaceId - ) - Add-LogMessage -Level Info "Enable logging for $ResourceName to log analytics workspace" - $null = New-AzDiagnosticSetting -Name "LogToWorkspace" -ResourceId $ResourceId -WorkspaceId $WorkspaceId - if ($?) { - Add-LogMessage -Level Success "Enabled logging for $ResourceName to log analytics workspace" - } else { - Add-LogMessage -Level Fatal "Failed to enable logging for $ResourceName to log analytics workspace!" - } -} -Export-ModuleMember -Function Set-LogAnalyticsDiagnostics diff --git a/deployment/common/AzureNetwork.psm1 b/deployment/common/AzureNetwork.psm1 deleted file mode 100644 index be946421f4..0000000000 --- a/deployment/common/AzureNetwork.psm1 +++ /dev/null @@ -1,1119 +0,0 @@ -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/DataStructures -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Create network security group rule if it does not exist -# ------------------------------------------------------- -function Add-NetworkSecurityGroupRule { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of network security group rule to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "A NetworkSecurityGroup object to apply this rule to")] - [Microsoft.Azure.Commands.Network.Models.PSNetworkSecurityGroup]$NetworkSecurityGroup, - [Parameter(Mandatory = $true, HelpMessage = "A description of the network security rule")] - [string]$Description, - [Parameter(Mandatory = $true, HelpMessage = "Specifies the priority of a rule configuration")] - [Uint32]$Priority, - [Parameter(Mandatory = $true, HelpMessage = "Specifies whether a rule is evaluated on incoming or outgoing traffic")] - [string]$Direction, - [Parameter(Mandatory = $true, HelpMessage = "Specifies whether network traffic is allowed or denied")] - [string]$Access, - [Parameter(Mandatory = $true, HelpMessage = "Specifies the network protocol that a rule configuration applies to")] - [string]$Protocol, - [Parameter(Mandatory = $true, HelpMessage = "Source addresses. One or more of: a CIDR, an IP address range, a wildcard or an Azure tag (eg. VirtualNetwork)")] - [string[]]$SourceAddressPrefix, - [Parameter(Mandatory = $true, HelpMessage = "Source port or range. One or more of: an integer, a range of integers or a wildcard")] - [string[]]$SourcePortRange, - [Parameter(Mandatory = $true, HelpMessage = "Destination addresses. One or more of: a CIDR, an IP address range, a wildcard or an Azure tag (eg. VirtualNetwork)")] - [string[]]$DestinationAddressPrefix, - [Parameter(Mandatory = $true, HelpMessage = "Destination port or range. One or more of: an integer, a range of integers or a wildcard")] - [string[]]$DestinationPortRange, - [Parameter(Mandatory = $false, HelpMessage = "Print verbose logging messages")] - [switch]$VerboseLogging = $false - ) - try { - if ($VerboseLogging) { Add-LogMessage -Level Info "Ensuring that NSG rule '$Name' exists on '$($NetworkSecurityGroup.Name)'..." } - $null = Get-AzNetworkSecurityRuleConfig -Name $Name -NetworkSecurityGroup $NetworkSecurityGroup -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - if ($VerboseLogging) { Add-LogMessage -Level Info "[ ] Creating NSG rule '$Name'" } - try { - $null = Add-AzNetworkSecurityRuleConfig -Name "$Name" ` - -Access "$Access" ` - -Description "$Description" ` - -DestinationAddressPrefix $DestinationAddressPrefix ` - -DestinationPortRange $DestinationPortRange ` - -Direction "$Direction" ` - -NetworkSecurityGroup $NetworkSecurityGroup ` - -Priority $Priority ` - -Protocol "$Protocol" ` - -SourceAddressPrefix $SourceAddressPrefix ` - -SourcePortRange $SourcePortRange ` - -ErrorAction Stop | Set-AzNetworkSecurityGroup -ErrorAction Stop - if ($VerboseLogging) { Add-LogMessage -Level Success "Created NSG rule '$Name'" } - } catch { - Add-LogMessage -Level Fatal "Failed to create NSG rule '$Name'!" -Exception $_.Exception - } - } else { - if ($VerboseLogging) { Add-LogMessage -Level InfoSuccess "Updating NSG rule '$Name'" } - $null = Set-AzNetworkSecurityRuleConfig -Name "$Name" ` - -Access "$Access" ` - -Description "$Description" ` - -DestinationAddressPrefix $DestinationAddressPrefix ` - -DestinationPortRange $DestinationPortRange ` - -Direction "$Direction" ` - -NetworkSecurityGroup $NetworkSecurityGroup ` - -Priority $Priority ` - -Protocol "$Protocol" ` - -SourceAddressPrefix $SourceAddressPrefix ` - -SourcePortRange $SourcePortRange ` - -ErrorAction Stop | Set-AzNetworkSecurityGroup -ErrorAction Stop - } - } catch [Microsoft.Azure.Commands.Network.Common.NetworkCloudException] { - Add-LogMessage -Level Fatal "Azure network connection failed!" -Exception $_.Exception - } -} -Export-ModuleMember -Function Add-NetworkSecurityGroupRule - - -# Associate a VM to an NSG -# ------------------------ -function Add-VmToNSG { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual machine")] - [string]$VMName, - [Parameter(Mandatory = $true, HelpMessage = "Name of network security group")] - [string]$NSGName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that the VM belongs to")] - [string]$VmResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that the NSG belongs to")] - [string]$NsgResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "Allow failures, printing a warning message instead of throwing an exception")] - [switch]$WarnOnFailure - ) - $LogLevel = $WarnOnFailure ? "Warning" : "Fatal" - Add-LogMessage -Level Info "[ ] Associating $VMName with $NSGName..." - $matchingVMs = Get-AzVM -Name $VMName -ResourceGroupName $VmResourceGroupName -ErrorAction SilentlyContinue - if ($matchingVMs.Count -ne 1) { Add-LogMessage -Level $LogLevel "Found $($matchingVMs.Count) VM(s) called $VMName!"; return } - $networkCard = Get-AzNetworkInterface | Where-Object { $_.VirtualMachine.Id -eq $matchingVMs[0].Id } - $nsg = Get-AzNetworkSecurityGroup -Name $NSGName -ResourceGroupName $NsgResourceGroupName -ErrorAction SilentlyContinue - if ($nsg.Count -ne 1) { Add-LogMessage -Level $LogLevel "Found $($nsg.Count) NSG(s) called $NSGName!"; return } - $networkCard.NetworkSecurityGroup = $nsg - $null = ($networkCard | Set-AzNetworkInterface) - if ($?) { - Start-Sleep -Seconds 10 # Allow NSG association to propagate - Add-LogMessage -Level Success "NSG association succeeded" - } else { - Add-LogMessage -Level Fatal "NSG association failed!" - } -} -Export-ModuleMember -Function Add-VmToNSG - - -# Create a private endpoint for an automation account -# --------------------------------------------------- -function Deploy-AutomationAccountEndpoint { - param( - [Parameter(Mandatory = $true, HelpMessage = "Automation account to create the private endpoint for")] - [Microsoft.Azure.Commands.Automation.Model.AutomationAccount]$Account, - [Parameter(Mandatory = $true, HelpMessage = "Subnet to deploy into")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet - ) - $endpoint = Deploy-PrivateEndpoint -Name "$($Account.AutomationAccountName)-endpoint".ToLower() ` - -GroupId "DSCAndHybridWorker" ` - -Location $Account.Location ` - -PrivateLinkServiceId (Get-ResourceId $Account.AutomationAccountName) ` - -ResourceGroupName $Account.ResourceGroupName ` - -Subnet $Subnet - return $endpoint -} -Export-ModuleMember -Function Deploy-AutomationAccountEndpoint - - -# Create a firewall if it does not exist -# -------------------------------------- -function Deploy-Firewall { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of public IP address to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual network containing the 'AzureFirewall' subnet")] - [string]$VirtualNetworkName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location, - [Parameter(Mandatory = $false, HelpMessage = "Force deallocation and reallocation of Firewall")] - [switch]$ForceReallocation - ) - # Ensure Firewall public IP address exists - $publicIp = Deploy-PublicIpAddress -Name "${Name}-PIP" -ResourceGroupName $ResourceGroupName -Location $Location -AllocationMethod Static -Sku "Standard" # NB. Azure Firewall requires a 'Standard' public IP - Add-LogMessage -Level Info "Ensuring that firewall '$Name' exists..." - $vnet = Get-AzVirtualNetwork -Name $VirtualNetworkName -ResourceGroupName $ResourceGroupName - $firewall = Get-AzFirewall -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating firewall '$Name'" - $firewall = New-AzFirewall -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -VirtualNetwork $vnet -PublicIpAddress @($publicIp) -EnableDnsProxy - if ($?) { - Add-LogMessage -Level Success "Created firewall '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create firewall '$Name'!" - } - } - # Ensure Firewall is running - $firewall = Start-Firewall -Name $Name -ResourceGroupName $ResourceGroupName -VirtualNetworkName $VirtualNetworkName - return $firewall -} -Export-ModuleMember -Function Deploy-Firewall - - -# Deploy an application rule to a firewall -# ---------------------------------------- -function Deploy-FirewallApplicationRule { - param( - [Parameter(Mandatory = $false, HelpMessage = "Whether these rules will allow or deny access to the specified resources")] - [ValidateSet("Allow", "Deny")] - [string]$ActionType, - [Parameter(Mandatory = $true, HelpMessage = "Name of application rule collection to add this to")] - [string]$CollectionName, - [Parameter(Mandatory = $true, HelpMessage = "Firewall to add this collection to")] - [Microsoft.Azure.Commands.Network.Models.PSAzureFirewall]$Firewall, - [Parameter(HelpMessage = "Make change to the local firewall object only. Useful when making lots of updates in a row. You will need to make a separate call to 'Set-AzFirewall' to apply the changes to the actual Azure firewall.")] - [switch]$LocalChangeOnly, - [Parameter(Mandatory = $true, HelpMessage = "Name of application rule")] - [string]$Name, - [Parameter(Mandatory = $false, HelpMessage = "Priority of this application rule collection")] - [UInt32]$Priority, - [Parameter(Mandatory = $true, ParameterSetName = "ByFqdn", HelpMessage = "Protocol to use")] - [string[]]$Protocol, - [Parameter(Mandatory = $true, HelpMessage = "Address of source")] - [string[]]$SourceAddress, - [Parameter(Mandatory = $true, ParameterSetName = "ByFqdn", HelpMessage = "List of FQDNs to apply rule to. Supports '*' wildcard at start of each FQDN.")] - [string[]]$TargetFqdn, - [Parameter(Mandatory = $true, ParameterSetName = "ByTag", HelpMessage = "List of FQDN tags to apply rule to. An FQN tag represents a set of Azure-curated FQDNs.")] - [string[]]$TargetTag - ) - Add-LogMessage -Level Info "[ ] Ensuring that application rule '$Name' exists..." - $params = @{} - if ($TargetTag) { $params["FqdnTag"] = $TargetTag } - if ($TargetFqdn) { $params["TargetFqdn"] = $TargetFqdn } - $rule = New-AzFirewallApplicationRule -Name $Name -SourceAddress $SourceAddress -Protocol $Protocol @params - try { - $ruleCollection = $Firewall.GetApplicationRuleCollectionByName($CollectionName) - # Overwrite any existing rule with the same name to ensure that we can update if settings have changed - $existingRule = $ruleCollection.Rules | Where-Object { $_.Name -eq $Name } - if ($existingRule) { $ruleCollection.RemoveRuleByName($Name) } - $ruleCollection.AddRule($rule) - # Remove the existing rule collection to ensure that we can update with the new rule - $Firewall.RemoveApplicationRuleCollectionByName($ruleCollection.Name) - } catch [System.Management.Automation.MethodInvocationException] { - $ruleCollection = New-AzFirewallApplicationRuleCollection -Name $CollectionName -Priority $Priority -ActionType $ActionType -Rule $rule - if (-not $?) { - Add-LogMessage -Level Fatal "Failed to create application rule collection '$CollectionName'!" - } - } - try { - $null = $Firewall.ApplicationRuleCollections.Add($ruleCollection) - if ($LocalChangeOnly) { - Add-LogMessage -Level InfoSuccess "Added application rule '$Name' to set of rules to update on remote firewall." - } else { - $Firewall = Set-AzFirewall -AzureFirewall $Firewall -ErrorAction Stop - Add-LogMessage -Level Success "Ensured that application rule '$Name' exists and updated remote firewall." - } - } catch [System.Management.Automation.MethodInvocationException], [Microsoft.Rest.Azure.CloudException] { - Add-LogMessage -Level Fatal "Failed to ensure that application rule '$Name' exists!" - } - return $Firewall -} -Export-ModuleMember -Function Deploy-FirewallApplicationRule - - -# Deploy a network rule collection to a firewall -# ---------------------------------------------- -function Deploy-FirewallNetworkRule { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [ValidateSet("Allow", "Deny")] - [string]$ActionType, - [Parameter(Mandatory = $true, HelpMessage = "Name of network rule collection to add this to")] - [string]$CollectionName, - [Parameter(Mandatory = $true, ParameterSetName = "ByAddress", HelpMessage = "Address(es) of destination")] - [string[]]$DestinationAddress, - [Parameter(Mandatory = $true, ParameterSetName = "ByFQDN", HelpMessage = "FQDN(s) of destination")] - [string[]]$DestinationFqdn, - [Parameter(Mandatory = $true, HelpMessage = "Port(s) of destination")] - [string[]]$DestinationPort, - [Parameter(Mandatory = $true, HelpMessage = "Firewall to add this collection to")] - [Microsoft.Azure.Commands.Network.Models.PSAzureFirewall]$Firewall, - [Parameter(HelpMessage = "Make change to the local firewall object only. Useful when making lots of updates in a row. You will need to make a separate call to 'Set-AzFirewall' to apply the changes to the actual Azure firewall.")] - [switch]$LocalChangeOnly, - [Parameter(Mandatory = $true, HelpMessage = "Name of network rule")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [UInt32]$Priority, - [Parameter(Mandatory = $true, HelpMessage = "Protocol to use")] - [string[]]$Protocol, - [Parameter(Mandatory = $true, HelpMessage = "Address(es) of source")] - [string[]]$SourceAddress - ) - Add-LogMessage -Level Info "[ ] Ensuring that traffic from '$SourceAddress' to '$($DestinationAddress ? $DestinationAddress : $DestinationFqdn)' on ports '$DestinationPort' over $Protocol is set on $($Firewall.Name)..." - $params = @{} - if ($DestinationAddress) { $params["DestinationAddress"] = $DestinationAddress } - if ($DestinationFqdn) { $params["DestinationFqdn"] = $DestinationFqdn } - $rule = New-AzFirewallNetworkRule -Name $Name -SourceAddress $SourceAddress -DestinationPort $DestinationPort -Protocol $Protocol @params - try { - $ruleCollection = $Firewall.GetNetworkRuleCollectionByName($CollectionName) - Add-LogMessage -Level InfoSuccess "Network rule collection '$CollectionName' already exists" - # Overwrite any existing rule with the same name to ensure that we can update if settings have changed - $existingRule = $ruleCollection.Rules | Where-Object { $_.Name -eq $Name } - if ($existingRule) { $ruleCollection.RemoveRuleByName($Name) } - $ruleCollection.AddRule($rule) - # Remove the existing rule collection to ensure that we can update with the new rule - $Firewall.RemoveNetworkRuleCollectionByName($ruleCollection.Name) - } catch [System.Management.Automation.MethodInvocationException] { - $ruleCollection = New-AzFirewallNetworkRuleCollection -Name $CollectionName -Priority $Priority -ActionType $ActionType -Rule $rule - if (-not $?) { - Add-LogMessage -Level Fatal "Failed to create network rule collection '$CollectionName'!" - } - } - try { - $null = $Firewall.NetworkRuleCollections.Add($ruleCollection) - if ($LocalChangeOnly) { - Add-LogMessage -Level InfoSuccess "Added network rule '$Name' to set of rules to update on remote firewall." - } else { - $Firewall = Set-AzFirewall -AzureFirewall $Firewall -ErrorAction Stop - Add-LogMessage -Level Success "Ensured that network rule '$Name' exists and updated remote firewall." - } - } catch [System.Management.Automation.MethodInvocationException], [Microsoft.Rest.Azure.CloudException] { - Add-LogMessage -Level Fatal "Failed to ensure that network rule '$Name' exists!" - } - return $Firewall -} -Export-ModuleMember -Function Deploy-FirewallNetworkRule - - -# Create a virtual machine NIC -# ---------------------------- -function Deploy-NetworkInterface { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM NIC to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subnet to attach this NIC to")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location, - [Parameter(Mandatory = $false, HelpMessage = "Public IP address for this NIC")] - [ValidateSet("Dynamic", "Static")] - [string]$PublicIpAddressAllocation = $null, - [Parameter(Mandatory = $false, HelpMessage = "Private IP address for this NIC")] - [string]$PrivateIpAddress = $null - ) - Add-LogMessage -Level Info "Ensuring that VM network card '$Name' exists..." - $vmNic = Get-AzNetworkInterface -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating VM network card '$Name'" - $ipAddressParams = @{} - if ($PublicIpAddressAllocation) { - $PublicIpAddress = Deploy-PublicIpAddress -Name "$Name-PIP" -ResourceGroupName $ResourceGroupName -AllocationMethod $PublicIpAddressAllocation -Location $Location - $ipAddressParams["PublicIpAddress"] = $PublicIpAddress - } - if ($PrivateIpAddress) { $ipAddressParams["PrivateIpAddress"] = $PrivateIpAddress } - $vmNic = New-AzNetworkInterface -Name $Name -ResourceGroupName $ResourceGroupName -Subnet $Subnet -IpConfigurationName "ipconfig-$Name" -Location $Location @ipAddressParams -Force - if ($?) { - Add-LogMessage -Level Success "Created VM network card '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create VM network card '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "VM network card '$Name' already exists" - } - return $vmNic -} -Export-ModuleMember -Function Deploy-NetworkInterface - - -# Create a private endpoint for an automation account -# --------------------------------------------------- -function Deploy-MonitorPrivateLinkScopeEndpoint { - param( - [Parameter(Mandatory = $true, HelpMessage = "Location to deploy the endpoint")] - [string]$Location, - [Parameter(Mandatory = $true, HelpMessage = "Private link scope to connect")] - [Microsoft.Azure.Commands.Insights.OutputClasses.PSMonitorPrivateLinkScope]$PrivateLinkScope, - [Parameter(Mandatory = $true, HelpMessage = "Subnet to deploy into")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet - ) - $endpoint = Deploy-PrivateEndpoint -Name "$($PrivateLinkScope.Name)-endpoint".ToLower() ` - -GroupId "azuremonitor" ` - -Location $Location ` - -PrivateLinkServiceId $PrivateLinkScope.Id ` - -ResourceGroupName (Get-ResourceGroupName $PrivateLinkScope.Name) ` - -Subnet $Subnet - return $endpoint -} -Export-ModuleMember -Function Deploy-MonitorPrivateLinkScopeEndpoint - - -# Create network security group if it does not exist -# -------------------------------------------------- -function Deploy-NetworkSecurityGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of network security group to deploy")] - $Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - $ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - $Location - ) - Add-LogMessage -Level Info "Ensuring that network security group '$Name' exists..." - $nsg = Get-AzNetworkSecurityGroup -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating network security group '$Name'" - $nsg = New-AzNetworkSecurityGroup -Name $Name -Location $Location -ResourceGroupName $ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Created network security group '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create network security group '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Network security group '$Name' already exists" - } - return $nsg -} -Export-ModuleMember -Function Deploy-NetworkSecurityGroup - - -# Create a public IP address if it does not exist -# ----------------------------------------------- -function Deploy-PublicIpAddress { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of public IP address to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Allocation method (static or dynamic)")] - [ValidateSet("Dynamic", "Static")] - [string]$AllocationMethod, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location, - [Parameter(Mandatory = $false, HelpMessage = "SKU ('Basic' or 'Standard')")] - [ValidateSet("Basic", "Standard")] - [string]$Sku = "Basic" - ) - Add-LogMessage -Level Info "Ensuring that public IP address '$Name' exists..." - $publicIpAddress = Get-AzPublicIpAddress -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating public IP address '$Name'" - $ipAddressParams = @{} - $publicIpAddress = New-AzPublicIpAddress -Name $Name -ResourceGroupName $ResourceGroupName -AllocationMethod $AllocationMethod -Location $Location -Sku $Sku @ipAddressParams - if ($?) { - Add-LogMessage -Level Success "Created public IP address '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create public IP address '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Public IP address '$Name' already exists" - } - return $publicIpAddress -} -Export-ModuleMember -Function Deploy-PublicIpAddress - - -# Create a private endpoint -# ------------------------- -function Deploy-PrivateEndpoint { - param( - [Parameter(Mandatory = $true, HelpMessage = "Group ID for this endpoint")] - [string]$GroupId, - [Parameter(Mandatory = $true, HelpMessage = "Location to deploy the endpoint")] - [string]$Location, - [Parameter(Mandatory = $true, HelpMessage = "Name of the endpoint")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "ID of the service to link against")] - [string]$PrivateLinkServiceId, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subnet to deploy into")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet - ) - Add-LogMessage -Level Info "Ensuring that private endpoint '$Name' exists..." - $endpoint = Get-AzPrivateEndpoint -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating private endpoint '$Name'" - $privateLinkServiceConnection = New-AzPrivateLinkServiceConnection -Name "${Name}LinkServiceConnection" -PrivateLinkServiceId $PrivateLinkServiceId -GroupId $GroupId - $endpoint = New-AzPrivateEndpoint -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -Subnet $Subnet -PrivateLinkServiceConnection $privateLinkServiceConnection - if ($?) { - Add-LogMessage -Level Success "Created private endpoint '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create private endpoint '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Private endpoint '$Name' already exists" - } - return $endpoint -} -Export-ModuleMember -Function Deploy-PrivateEndpoint - - -# Create a route if it does not exist -# ----------------------------------- -function Deploy-Route { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of route to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of route table that this route should be deployed into")] - [string]$RouteTableName, - [Parameter(Mandatory = $true, HelpMessage = "CIDR that this route applies to")] - [string]$AppliesTo, - [Parameter(Mandatory = $true, HelpMessage = "The firewall IP address or one of 'Internet', 'None', 'VirtualNetworkGateway', 'VnetLocal'")] - [string]$NextHop - ) - $routeTable = Get-AzRouteTable -Name $RouteTableName - if (-not $routeTable) { - Add-LogMessage -Level Fatal "No route table named '$routeTableName' was found in this subscription!" - } - Add-LogMessage -Level Info "[ ] Ensuring that route '$Name' exists..." - $routeConfig = Get-AzRouteConfig -Name $Name -RouteTable $routeTable -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating route '$Name'" - if (@('Internet', 'None', 'VirtualNetworkGateway', 'VnetLocal').Contains($NextHop)) { - $null = Add-AzRouteConfig -Name $Name -RouteTable $routeTable -AddressPrefix $AppliesTo -NextHopType $NextHop | Set-AzRouteTable - } else { - $null = Add-AzRouteConfig -Name $Name -RouteTable $routeTable -AddressPrefix $AppliesTo -NextHopType "VirtualAppliance" -NextHopIpAddress $NextHop | Set-AzRouteTable - } - $routeConfig = Get-AzRouteConfig -Name $Name -RouteTable $routeTable - if ($?) { - Add-LogMessage -Level Success "Created route '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create route '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Route '$Name' already exists" - } - return $routeConfig -} -Export-ModuleMember -Function Deploy-Route - - -# Create a route table if it does not exist -# ----------------------------------------- -function Deploy-RouteTable { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of public IP address to deploy")] - $Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - $ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - $Location - ) - Add-LogMessage -Level Info "[ ] Ensuring that route table '$Name' exists..." - $routeTable = Get-AzRouteTable -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating route table '$Name'" - $routeTable = New-AzRouteTable -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -DisableBgpRoutePropagation - if ($?) { - Add-LogMessage -Level Success "Created route table '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create route table '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Route table '$Name' already exists" - } - return $routeTable -} -Export-ModuleMember -Function Deploy-RouteTable - - -# Create subnet if it does not exist -# ---------------------------------- -function Deploy-Subnet { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of subnet to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "A VirtualNetwork object to deploy into")] - $VirtualNetwork, - [Parameter(Mandatory = $true, HelpMessage = "Specifies a range of IP addresses for a virtual network")] - [string]$AddressPrefix - ) - Add-LogMessage -Level Info "Ensuring that subnet '$Name' exists..." - $null = Get-AzVirtualNetworkSubnetConfig -Name $Name -VirtualNetwork $VirtualNetwork -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating subnet '$Name'" - $null = Add-AzVirtualNetworkSubnetConfig -Name $Name -VirtualNetwork $VirtualNetwork -AddressPrefix $AddressPrefix - $VirtualNetwork = Set-AzVirtualNetwork -VirtualNetwork $VirtualNetwork - if ($?) { - Add-LogMessage -Level Success "Created subnet '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create subnet '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Subnet '$Name' already exists" - } - return Get-Subnet -Name $Name -VirtualNetworkName $VirtualNetwork.Name -ResourceGroupName $VirtualNetwork.ResourceGroupName -} -Export-ModuleMember -Function Deploy-Subnet - - -# Create virtual network if it does not exist -# ------------------------------------------ -function Deploy-VirtualNetwork { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual network to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Specifies a range of IP addresses for a virtual network")] - [string]$AddressPrefix, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location, - [Parameter(Mandatory = $false, HelpMessage = "DNS servers to attach to this virtual network")] - [string[]]$DnsServer - ) - Add-LogMessage -Level Info "Ensuring that virtual network '$Name' exists..." - $vnet = Get-AzVirtualNetwork -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating virtual network '$Name'" - $params = @{} - if ($DnsServer) { $params["DnsServer"] = $DnsServer } - $vnet = New-AzVirtualNetwork -Name $Name -Location $Location -ResourceGroupName $ResourceGroupName -AddressPrefix "$AddressPrefix" @params -Force - if ($?) { - Add-LogMessage -Level Success "Created virtual network '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create virtual network '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Virtual network '$Name' already exists" - } - return $vnet -} -Export-ModuleMember -Function Deploy-VirtualNetwork - - -# Create virtual network gateway if it does not exist -# --------------------------------------------------- -function Deploy-VirtualNetworkGateway { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual network gateway to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - [string]$Location, - [Parameter(Mandatory = $true, HelpMessage = "ID of the public IP address to use")] - [string]$PublicIpAddressId, - [Parameter(Mandatory = $true, HelpMessage = "ID of the subnet to deploy into")] - [string]$SubnetId, - [Parameter(Mandatory = $true, HelpMessage = "Point-to-site certificate used by the gateway")] - [string]$P2SCertificate, - [Parameter(Mandatory = $true, HelpMessage = "Range of IP addresses used by the point-to-site VpnClient")] - [string]$VpnClientAddressPool - ) - Add-LogMessage -Level Info "Ensuring that virtual network gateway '$Name' exists..." - $gateway = Get-AzVirtualNetworkGateway -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating virtual network gateway '$Name'..." - $ipconfig = New-AzVirtualNetworkGatewayIpConfig -Name "shmgwipconf" -SubnetId $SubnetId -PublicIpAddressId $PublicIpAddressId - $rootCertificate = New-AzVpnClientRootCertificate -Name "SafeHavenManagementP2SRootCert" -PublicCertData $P2SCertificate - $gateway = New-AzVirtualNetworkGateway -Name $Name ` - -GatewaySku VpnGw1 ` - -GatewayType Vpn ` - -IpConfigurations $ipconfig ` - -Location $Location ` - -ResourceGroupName $ResourceGroupName ` - -VpnClientAddressPool $VpnClientAddressPool ` - -VpnClientProtocol IkeV2, SSTP ` - -VpnClientRootCertificates $rootCertificate ` - -VpnType RouteBased - if ($?) { - Add-LogMessage -Level Success "Created virtual network gateway '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create virtual network gateway '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Virtual network gateway '$Name' already exists" - } - return $gateway -} -Export-ModuleMember -Function Deploy-VirtualNetworkGateway - - -# Get next available IP address in range -# -------------------------------------- -function Get-NextAvailableIpInRange { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input range in CIDR notation")] - [string]$IpRangeCidr, - [Parameter(Mandatory = $false, HelpMessage = "Offset to apply before returning an IP address")] - [int]$Offset, - [Parameter(Mandatory = $false, HelpMessage = "Virtual network to check availability against")] - [Microsoft.Azure.Commands.Network.Models.PSVirtualNetwork]$VirtualNetwork - ) - # Get the start and end IP decimals for this CIDR range - $ipStart, $ipEnd = Convert-CidrToIpAddressRange -IpRangeCidr $IpRangeCidr -AsDecimal - - # Return the full range or filter as required - $ipAddresses = $ipStart..$ipEnd | ForEach-Object { Convert-DecimalToIpAddress -IpDecimal $_ } | Select-Object -Skip $Offset - if ($VirtualNetwork) { - $ipAddress = $ipAddresses | Where-Object { (Test-AzPrivateIPAddressAvailability -VirtualNetwork $VirtualNetwork -IPAddress $_).Available } | Select-Object -First 1 - } else { - $ipAddress = $ipAddresses | Select-Object -First 1 - } - if (-not $ipAddress) { - Add-LogMessage -Level Fatal "There are no free IP addresses in '$IpRangeCidr' after applying the offset '$Offset'!" - } - return $ipAddress -} -Export-ModuleMember -Function Get-NextAvailableIpInRange - - -# Get subnet -# ---------- -function Get-Subnet { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of subnet to retrieve")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual network that this subnet belongs to")] - [string]$VirtualNetworkName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group that this subnet belongs to")] - [string]$ResourceGroupName - ) - $virtualNetwork = Get-AzVirtualNetwork -Name $VirtualNetworkName -ResourceGroupName $ResourceGroupName - return ($virtualNetwork.Subnets | Where-Object { $_.Name -eq $Name })[0] -} -Export-ModuleMember -Function Get-Subnet - - -# Get the virtual network that a given subnet belongs to -# ------------------------------------------------------ -function Get-VirtualNetwork { - param( - [Parameter(Mandatory = $false, HelpMessage = "Name of virtual network to retrieve")] - [string]$Name, - [Parameter(Mandatory = $false, HelpMessage = "Name of resource group that this virtual network belongs to")] - [string]$ResourceGroupName - ) - $params = @{} - if ($Name) { $params["Name"] = $Name } - if ($ResourceGroupName) { $params["ResourceGroupName"] = $ResourceGroupName } - return Get-AzVirtualNetwork @params -} -Export-ModuleMember -Function Get-VirtualNetwork - - -# Get the virtual network that a given subnet belongs to -# ------------------------------------------------------ -function Get-VirtualNetworkFromSubnet { - param( - [Parameter(Mandatory = $true, HelpMessage = "Subnet that we want the virtual network for")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet - ) - $originalContext = Get-AzContext - $null = Set-AzContext -SubscriptionId $Subnet.Id.Split("/")[2] -ErrorAction Stop - $virtualNetwork = Get-AzVirtualNetwork | Where-Object { (($_.Subnets | Where-Object { $_.Id -eq $Subnet.Id }).Count -gt 0) } - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - return $virtualNetwork -} -Export-ModuleMember -Function Get-VirtualNetworkFromSubnet - - -# Remove a virtual machine NIC -# ---------------------------- -function Remove-NetworkInterface { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM NIC to remove")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to remove from")] - [string]$ResourceGroupName - ) - $null = Get-AzNetworkInterface -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level InfoSuccess "VM network card '$Name' does not exist" - } else { - Add-LogMessage -Level Info "[ ] Removing VM network card '$Name'" - $null = Remove-AzNetworkInterface -Name $Name -ResourceGroupName $ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Removed VM network card '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to remove VM network card '$Name'" - } - } -} -Export-ModuleMember -Function Remove-NetworkInterface - - -# Set Network Security Group Rules -# -------------------------------- -function Set-NetworkSecurityGroupRules { - param( - [parameter(Mandatory = $true, HelpMessage = "Network Security Group to set rules for")] - [Microsoft.Azure.Commands.Network.Models.PSNetworkSecurityGroup]$NetworkSecurityGroup, - [parameter(Mandatory = $true, HelpMessage = "Rules to set for Network Security Group")] - [Object[]]$Rules - ) - Add-LogMessage -Level Info "[ ] Setting $($Rules.Count) rules for Network Security Group '$($NetworkSecurityGroup.Name)'" - try { - $existingRules = @(Get-AzNetworkSecurityRuleConfig -NetworkSecurityGroup $NetworkSecurityGroup) - foreach ($existingRule in $existingRules) { - $NetworkSecurityGroup = Remove-AzNetworkSecurityRuleConfig -Name $existingRule.Name -NetworkSecurityGroup $NetworkSecurityGroup - } - } catch { - Add-LogMessage -Level Fatal "Error removing existing rules from Network Security Group '$($NetworkSecurityGroup.Name)'." -Exception $_.Exception - } - try { - foreach ($rule in $Rules) { - $null = Add-NetworkSecurityGroupRule -NetworkSecurityGroup $NetworkSecurityGroup @rule - } - } catch { - Add-LogMessage -Level Fatal "Error adding provided rules to Network Security Group '$($NetworkSecurityGroup.Name)'." -Exception $_.Exception - } - try { - $NetworkSecurityGroup = Get-AzNetworkSecurityGroup -Name $NetworkSecurityGroup.Name -ResourceGroupName $NetworkSecurityGroup.ResourceGroupName -ErrorAction Stop - $updatedRules = @(Get-AzNetworkSecurityRuleConfig -NetworkSecurityGroup $NetworkSecurityGroup) - foreach ($updatedRule in $updatedRules) { - $sourceAddressText = ($updatedRule.SourceAddressPrefix -eq "*") ? "any source" : $updatedRule.SourceAddressPrefix - $destinationAddressText = ($updatedRule.DestinationAddressPrefix -eq "*") ? "any destination" : $updatedRule.DestinationAddressPrefix - $destinationPortText = ($updatedRule.DestinationPortRange -eq "*") ? "any port" : "ports $($updatedRule.DestinationPortRange)" - Add-LogMessage -Level Success "Set $($updatedRule.Name) rule to $($updatedRule.Access) connections from $sourceAddressText to $destinationPortText on $destinationAddressText." - } - } catch { - Add-LogMessage -Level Fatal "Failed to add one or more NSG rules!" -Exception $_.Exception - } - return $NetworkSecurityGroup -} -Export-ModuleMember -Function Set-NetworkSecurityGroupRules - - -# Attach a network security group to a subnet -# ------------------------------------------- -function Set-SubnetNetworkSecurityGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Subnet whose NSG will be set")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet, - [Parameter(Mandatory = $true, HelpMessage = "Network security group to attach")] - $NetworkSecurityGroup, - [Parameter(Mandatory = $false, HelpMessage = "Virtual network that the subnet belongs to")] - $VirtualNetwork - ) - if (-not $VirtualNetwork) { - $VirtualNetwork = Get-VirtualNetworkFromSubnet -Subnet $Subnet - } - Add-LogMessage -Level Info "Ensuring that NSG '$($NetworkSecurityGroup.Name)' is attached to subnet '$($Subnet.Name)'..." - $null = Set-AzVirtualNetworkSubnetConfig -Name $Subnet.Name -VirtualNetwork $VirtualNetwork -AddressPrefix $Subnet.AddressPrefix -NetworkSecurityGroup $NetworkSecurityGroup - $success = $? - $VirtualNetwork = Set-AzVirtualNetwork -VirtualNetwork $VirtualNetwork - $success = $success -and $? - $updatedSubnet = Get-Subnet -Name $Subnet.Name -VirtualNetworkName $VirtualNetwork.Name -ResourceGroupName $VirtualNetwork.ResourceGroupName - $success = $success -and $? - if ($success) { - Add-LogMessage -Level Success "Set network security group on '$($Subnet.Name)'" - } else { - Add-LogMessage -Level Fatal "Failed to set network security group on '$($Subnet.Name)'!" - } - return $updatedSubnet -} -Export-ModuleMember -Function Set-SubnetNetworkSecurityGroup - - -# Peer two vnets -# -------------- -function Set-VnetPeering { - param( - [Parameter(Mandatory = $false, HelpMessage = "Enable use of remote gateway from the first VNet")] - [switch]$VNet1AllowRemoteGateway, - [Parameter(Mandatory = $true, HelpMessage = "Name of the first of two VNets to peer")] - [string]$Vnet1Name, - [Parameter(Mandatory = $true, HelpMessage = "Resource group name of the first VNet")] - [string]$Vnet1ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subscription name of the first VNet")] - [string]$Vnet1SubscriptionName, - [Parameter(Mandatory = $false, HelpMessage = "Enable use of remote gateway from the second VNet")] - [switch]$VNet2AllowRemoteGateway, - [Parameter(Mandatory = $true, HelpMessage = "Name of the second of two VNets to peer")] - [string]$Vnet2Name, - [Parameter(Mandatory = $true, HelpMessage = "Resource group name of the second VNet")] - [string]$Vnet2ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subscription name of the second VNet")] - [string]$Vnet2SubscriptionName - ) - # Exit early if trying to enable remote gateways on both virtual networks - if ($VNet1AllowRemoteGateway -and $VNet2AllowRemoteGateway) { - Add-LogMessage -Level Fatal "Remote gateways cannot be used from both VNets in a peering!" - } - try { - # Get original subscription - $originalContext = Get-AzContext - Add-LogMessage -Level Info "Peering virtual networks ${Vnet1Name} and ${Vnet2Name}." - - # Get virtual networks - $null = Set-AzContext -SubscriptionId $Vnet1SubscriptionName -ErrorAction Stop - $Vnet1 = Get-AzVirtualNetwork -Name $Vnet1Name -ResourceGroupName $Vnet1ResourceGroupName -ErrorAction Stop - $null = Set-AzContext -SubscriptionId $Vnet2SubscriptionName -ErrorAction Stop - $Vnet2 = Get-AzVirtualNetwork -Name $Vnet2Name -ResourceGroupName $Vnet2ResourceGroupName -ErrorAction Stop - - # Remove any existing peerings - $null = Set-AzContext -SubscriptionId $Vnet1SubscriptionName -ErrorAction Stop - $null = Get-AzVirtualNetworkPeering -VirtualNetworkName $Vnet1.Name -ResourceGroupName $Vnet1.ResourceGroupName | Where-Object { $_.RemoteVirtualNetwork.Id -eq $Vnet2.Id } | Remove-AzVirtualNetworkPeering -Force -ErrorAction Stop - $null = Set-AzContext -SubscriptionId $Vnet2SubscriptionName -ErrorAction Stop - $null = Get-AzVirtualNetworkPeering -VirtualNetworkName $Vnet2.Name -ResourceGroupName $Vnet2.ResourceGroupName | Where-Object { $_.RemoteVirtualNetwork.Id -eq $Vnet1.Id } | Remove-AzVirtualNetworkPeering -Force -ErrorAction Stop - - # Set remote gateway parameters if requested - $paramsVnet1 = @{} - $paramsVnet2 = @{} - if ($VNet1AllowRemoteGateway.IsPresent) { - $paramsVnet1["AllowGatewayTransit"] = $true - $paramsVnet2["UseRemoteGateways"] = $true - } - if ($VNet2AllowRemoteGateway.IsPresent) { - $paramsVnet1["UseRemoteGateways"] = $true - $paramsVnet2["AllowGatewayTransit"] = $true - } - - # Create peering in the direction VNet1 -> VNet2 - $null = Set-AzContext -SubscriptionId $Vnet1SubscriptionName -ErrorAction Stop - $PeeringName = "PEER_${Vnet2Name}" - Add-LogMessage -Level Info "[ ] Adding peering '$PeeringName' to virtual network ${Vnet1Name}." - $null = Add-AzVirtualNetworkPeering -Name "$PeeringName" -VirtualNetwork $vnet1 -RemoteVirtualNetworkId $Vnet2.Id @paramsVnet1 -ErrorAction Stop - if ($?) { - Add-LogMessage -Level Success "Adding peering '$PeeringName' succeeded" - } else { - Add-LogMessage -Level Fatal "Adding peering '$PeeringName' failed!" - } - # Create peering in the direction VNet2 -> VNet1 - $null = Set-AzContext -SubscriptionId $Vnet2SubscriptionName -ErrorAction Stop - $PeeringName = "PEER_${Vnet1Name}" - Add-LogMessage -Level Info "[ ] Adding peering '$PeeringName' to virtual network ${Vnet2Name}." - $null = Add-AzVirtualNetworkPeering -Name "$PeeringName" -VirtualNetwork $Vnet2 -RemoteVirtualNetworkId $Vnet1.Id @paramsVnet2 -ErrorAction Stop - if ($?) { - Add-LogMessage -Level Success "Adding peering '$PeeringName' succeeded" - } else { - Add-LogMessage -Level Fatal "Adding peering '$PeeringName' failed!" - } - } finally { - # Switch back to original subscription - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } -} -Export-ModuleMember -Function Set-VnetPeering - - -# Ensure Firewall is running, with option to force a restart -# ---------------------------------------------------------- -function Start-Firewall { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of Firewall")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of Firewall resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of virtual network containing the 'AzureFirewall' subnet")] - [string]$VirtualNetworkName, - [Parameter(Mandatory = $false, HelpMessage = "Force restart of Firewall")] - [switch]$ForceRestart - ) - Add-LogMessage -Level Info "Ensuring that firewall '$Name' is running..." - $firewall = Get-AzFirewall -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if (-not $firewall) { - Add-LogMessage -Level Error "Firewall '$Name' does not exist in $ResourceGroupName" - } else { - $virtualNetwork = Get-AzVirtualNetwork -Name $VirtualNetworkName - $publicIP = Get-AzPublicIpAddress -Name "${Name}-PIP" -ResourceGroupName $ResourceGroupName - if ($ForceRestart) { - Add-LogMessage -Level Info "Restart requested. Deallocating firewall '$Name'..." - $firewall = Stop-Firewall -Name $Name -ResourceGroupName $ResourceGroupName - } - # At this point we either have a running firewall or a stopped firewall. - # A firewall is allocated if it has one or more IP configurations. - if ($firewall.IpConfigurations) { - Add-LogMessage -Level InfoSuccess "Firewall '$Name' is already running." - } else { - try { - Add-LogMessage -Level Info "[ ] Starting firewall '$Name'..." - $firewall.Allocate($virtualNetwork, $publicIp) - $firewall = Set-AzFirewall -AzureFirewall $firewall -ErrorAction Stop - Add-LogMessage -Level Success "Firewall '$Name' successfully started." - } catch { - Add-LogMessage -Level Fatal "Failed to (re)start firewall '$Name'" -Exception $_.Exception - } - } - } - return $firewall -} -Export-ModuleMember -Function Start-Firewall - - -# Ensure Firewall is deallocated -# ------------------------------ -function Stop-Firewall { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of Firewall")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of Firewall resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "Submit request to stop but don't wait for completion.")] - [switch]$NoWait - ) - Add-LogMessage -Level Info "Ensuring that firewall '$Name' is deallocated..." - $firewall = Get-AzFirewall -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if (-not $firewall) { - Add-LogMessage -Level Fatal "Firewall '$Name' does not exist." - Exit 1 - } - # At this point we either have a running firewall or a stopped firewall. - # A firewall is allocated if it has one or more IP configurations. - $firewallAllocacted = ($firewall.IpConfigurations.Length -ge 1) - if (-not $firewallAllocacted) { - Add-LogMessage -Level InfoSuccess "Firewall '$Name' is already deallocated." - } else { - Add-LogMessage -Level Info "[ ] Deallocating firewall '$Name'..." - $firewall.Deallocate() - $firewall = Set-AzFirewall -AzureFirewall $firewall -AsJob:$NoWait -ErrorAction Stop - if ($NoWait) { - Add-LogMessage -Level Success "Request to deallocate firewall '$Name' accepted." - } else { - Add-LogMessage -Level Success "Firewall '$Name' successfully deallocated." - } - $firewall = Get-AzFirewall -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - } - return $firewall -} -Export-ModuleMember -Function Stop-Firewall - - -# Update NSG rule to match a given configuration -# ---------------------------------------------- -function Update-NetworkSecurityGroupRule { - param( - [Parameter(Mandatory = $false, HelpMessage = "Rule access type")] - [ValidateSet("Allow", "Deny")] - [string]$Access = $null, - [Parameter(Mandatory = $false, HelpMessage = "Rule destination address prefix")] - [string[]]$DestinationAddressPrefix = $null, - [Parameter(Mandatory = $false, HelpMessage = "Rule destination port range")] - [string[]]$DestinationPortRange = $null, - [Parameter(Mandatory = $false, HelpMessage = "Rule direction")] - [ValidateSet("Inbound", "Outbound")] - [string]$Direction = $null, - [Parameter(Mandatory = $true, HelpMessage = "Name of NSG rule to update")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "NSG that this rule belongs to")] - [Microsoft.Azure.Commands.Network.Models.PSNetworkSecurityGroup]$NetworkSecurityGroup, - [Parameter(Mandatory = $false, HelpMessage = "Rule Priority")] - [int]$Priority = $null, - [Parameter(Mandatory = $false, HelpMessage = "Rule protocol")] - [string]$Protocol = $null, - [Parameter(Mandatory = $false, HelpMessage = "Rule source address prefix")] - [string[]]$SourceAddressPrefix = $null, - [Parameter(Mandatory = $false, HelpMessage = "Rule source port range")] - [string[]]$SourcePortRange = $null - ) - # Load any unspecified parameters from the existing rule - try { - $ruleBefore = Get-AzNetworkSecurityRuleConfig -Name $Name -NetworkSecurityGroup $NetworkSecurityGroup - $Description = $ruleBefore.Description - if (-not $Access) { $Access = $ruleBefore.Access } - if (-not $DestinationAddressPrefix) { $DestinationAddressPrefix = $ruleBefore.DestinationAddressPrefix } - if (-not $DestinationPortRange) { $DestinationPortRange = $ruleBefore.DestinationPortRange } - if (-not $Direction) { $Direction = $ruleBefore.Direction } - if (-not $Priority) { $Priority = $ruleBefore.Priority } - if (-not $Protocol) { $Protocol = $ruleBefore.Protocol } - if (-not $SourceAddressPrefix) { $SourceAddressPrefix = $ruleBefore.SourceAddressPrefix } - if (-not $SourcePortRange) { $SourcePortRange = $ruleBefore.SourcePortRange } - # Print the update we're about to make - if ($Direction -eq "Inbound") { - Add-LogMessage -Level Info "[ ] Updating '$Name' rule on '$($NetworkSecurityGroup.Name)' to '$Access' access from '$SourceAddressPrefix'" - } else { - Add-LogMessage -Level Info "[ ] Updating '$Name' rule on '$($NetworkSecurityGroup.Name)' to '$Access' access to '$DestinationAddressPrefix'" - } - # Update rule and NSG (both are required) - $null = Set-AzNetworkSecurityRuleConfig -Access $Access ` - -Description $Description ` - -DestinationAddressPrefix $DestinationAddressPrefix ` - -DestinationPortRange $DestinationPortRange ` - -Direction $Direction ` - -Name $Name ` - -NetworkSecurityGroup $NetworkSecurityGroup ` - -Priority $Priority ` - -Protocol $Protocol ` - -SourceAddressPrefix $SourceAddressPrefix ` - -SourcePortRange $SourcePortRange ` - -ErrorAction Stop | Set-AzNetworkSecurityGroup -ErrorAction Stop - # Apply the rule and validate whether it succeeded - $ruleAfter = Get-AzNetworkSecurityRuleConfig -Name $Name -NetworkSecurityGroup $NetworkSecurityGroup - if (($ruleAfter.Access -eq $Access) -and - ($ruleAfter.Description -eq $Description) -and - ("$($ruleAfter.DestinationAddressPrefix)" -eq "$DestinationAddressPrefix") -and - ("$($ruleAfter.DestinationPortRange)" -eq "$DestinationPortRange") -and - ($ruleAfter.Direction -eq $Direction) -and - ($ruleAfter.Name -eq $Name) -and - ($ruleAfter.Priority -eq $Priority) -and - ($ruleAfter.Protocol -eq $Protocol) -and - ("$($ruleAfter.SourceAddressPrefix)" -eq "$SourceAddressPrefix") -and - ("$($ruleAfter.SourcePortRange)" -eq "$SourcePortRange")) { - if ($Direction -eq "Inbound") { - Add-LogMessage -Level Success "'$Name' on '$($NetworkSecurityGroup.Name)' will now '$($ruleAfter.Access)' access from '$($ruleAfter.SourceAddressPrefix)'" - } else { - Add-LogMessage -Level Success "'$Name' on '$($NetworkSecurityGroup.Name)' will now '$($ruleAfter.Access)' access to '$($ruleAfter.DestinationAddressPrefix)'" - } - } else { - if ($Direction -eq "Inbound") { - Add-LogMessage -Level Failure "'$Name' on '$($NetworkSecurityGroup.Name)' will now '$($ruleAfter.Access)' access from '$($ruleAfter.SourceAddressPrefix)'" - } else { - Add-LogMessage -Level Failure "'$Name' on '$($NetworkSecurityGroup.Name)' will now '$($ruleAfter.Access)' access to '$($ruleAfter.DestinationAddressPrefix)'" - } - } - # Return the rule - return $ruleAfter - } catch [System.Management.Automation.ValidationMetadataException] { - Add-LogMessage -Level Fatal "Could not find rule '$Name' on NSG '$($NetworkSecurityGroup.Name)'" -Exception $_.Exception - } -} -Export-ModuleMember -Function Update-NetworkSecurityGroupRule - - -# Update subnet and IP address for a VM -# ------------------------------------- -function Update-VMIpAddress { - param( - [Parameter(Mandatory = $true, HelpMessage = "Azure VM object", ParameterSetName = "ByObject", ValueFromPipeline = $true)] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine]$VM, - [Parameter(Mandatory = $true, HelpMessage = "VM name", ParameterSetName = "ByName")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "VM resource group", ParameterSetName = "ByName")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subnet to join")] - [Microsoft.Azure.Commands.Network.Models.PSChildResource]$Subnet, - [Parameter(Mandatory = $true, HelpMessage = "IP address to switch to")] - [string]$IpAddress - ) - # Get VM if not provided - if (-not $VM) { - $VM = Get-AzVM -Name $Name -ResourceGroup $ResourceGroupName - } - $networkCard = Get-AzNetworkInterface | Where-Object { $_.VirtualMachine.Id -eq $VM.Id } - if ($networkCard.Count -ne 1) { Add-LogMessage -Level Fatal "Found $($networkCard.Count) network cards for $VMName!" } - if ($networkCard.IpConfigurations[0].PrivateIpAddress -eq $IpAddress) { - Add-LogMessage -Level InfoSuccess "IP address for '$($VM.Name)' is already set to '$IpAddress'" - } else { - Add-LogMessage -Level Info "Updating subnet and IP address for '$($VM.Name)'..." - Stop-VM -VM $VM - $networkCard.IpConfigurations[0].Subnet.Id = $Subnet.Id - $networkCard.IpConfigurations[0].PrivateIpAddress = $IpAddress - $null = $networkCard | Set-AzNetworkInterface - # Validate changes - $networkCard = Get-AzNetworkInterface | Where-Object { $_.VirtualMachine.Id -eq $VM.Id } - if ($networkCard.IpConfigurations[0].Subnet.Id -eq $Subnet.Id) { - Add-LogMessage -Level Info "Set '$($VM.Name)' subnet to '$($Subnet.Name)'" - } else { - Add-LogMessage -Level Fatal "Failed to change subnet to '$($Subnet.Name)'!" - } - if ($networkCard.IpConfigurations[0].PrivateIpAddress -eq $IpAddress) { - Add-LogMessage -Level Info "Set '$($VM.Name)' IP address to '$IpAddress'" - } else { - Add-LogMessage -Level Fatal "Failed to change IP address to '$IpAddress'!" - } - Start-VM -VM $VM -ForceRestart - } -} -Export-ModuleMember -Function Update-VMIpAddress diff --git a/deployment/common/AzureOperationalInsights.psm1 b/deployment/common/AzureOperationalInsights.psm1 deleted file mode 100644 index b4cf5fa9ef..0000000000 --- a/deployment/common/AzureOperationalInsights.psm1 +++ /dev/null @@ -1,42 +0,0 @@ -Import-Module Az.OperationalInsights -ErrorAction Stop -Import-Module $PSScriptRoot/DataStructures -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Create log analytics workspace if it does not exist -# --------------------------------------------------- -function Deploy-LogAnalyticsWorkspace { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of log analytics workspace to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location to deploy into")] - [string]$Location - ) - Add-LogMessage -Level Info "Ensuring that log analytics workspace '$Name' exists..." - $Workspace = Get-AzOperationalInsightsWorkspace -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating log analytics workspace '$Name'" - $Workspace = New-AzOperationalInsightsWorkspace -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -Sku pergb2018 - if ($?) { - Add-LogMessage -Level Success "Created log analytics workspace '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create log analytics workspace '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Log analytics workspace '$Name' already exists" - } - if (-not $(Get-AzResourceProvider | Where-Object { $_.ProviderNamespace -eq "Microsoft.Insights" })) { - Add-LogMessage -Level Info "[ ] Registering Microsoft.Insights provider in this subscription..." - $null = Register-AzResourceProvider -ProviderNamespace "Microsoft.Insights" - Wait-For -Target "Microsoft.Insights provider to register" -Seconds 300 - if ($(Get-AzResourceProvider | Where-Object { $_.ProviderNamespace -eq "Microsoft.Insights" })) { - Add-LogMessage -Level Success "Successfully registered Microsoft.Insights provider" - } else { - Add-LogMessage -Level Fatal "Failed to register Microsoft.Insights provider!" - } - } - return $Workspace -} -Export-ModuleMember -Function Deploy-LogAnalyticsWorkspace diff --git a/deployment/common/AzurePrivateDns.psm1 b/deployment/common/AzurePrivateDns.psm1 deleted file mode 100644 index 7dee76c735..0000000000 --- a/deployment/common/AzurePrivateDns.psm1 +++ /dev/null @@ -1,117 +0,0 @@ -Import-Module Az.PrivateDns -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Connect a private DNS zone to an automation account -# --------------------------------------------------- -function Connect-PrivateDnsToVirtualNetwork { - param( - [Parameter(Mandatory = $true, HelpMessage = "Private DNS zone to connect to")] - [Microsoft.Azure.Commands.PrivateDns.Models.PSPrivateDnsZone]$DnsZone, - [Parameter(Mandatory = $true, HelpMessage = "Automation account to connect")] - [Microsoft.Azure.Commands.Network.Models.PSVirtualNetwork]$VirtualNetwork - ) - Add-LogMessage -Level Info "Ensuring that private DNS zone '$($DnsZone.Name)' is connected to virtual network '$($VirtualNetwork.Name)'.." - $link = Get-AzPrivateDnsVirtualNetworkLink -ZoneName $DnsZone.Name -ResourceGroupName $DnsZone.ResourceGroupName -ErrorAction SilentlyContinue | Where-Object { $_.VirtualNetworkId -eq $VirtualNetwork.Id } | Select-Object -First 1 - if (-not $link) { - Add-LogMessage -Level Info "[ ] Connecting private DNS zone '$($DnsZone.Name)' to virtual network '$($VirtualNetwork.Name)'" - try { - $linkName = "link-to-$($VirtualNetwork.Name)".Replace("_", "-").ToLower() - $link = New-AzPrivateDnsVirtualNetworkLink -ZoneName $DnsZone.Name -ResourceGroupName $DnsZone.ResourceGroupName -VirtualNetworkId $VirtualNetwork.Id -Name $linkName -ErrorAction Stop - Add-LogMessage -Level Success "Connected private DNS zone '$($DnsZone.Name)' to virtual network '$($VirtualNetwork.Name)'" - } catch { - Add-LogMessage -Level Fatal "Failed to connect private DNS zone '$($DnsZone.Name)' to virtual network '$($VirtualNetwork.Name)'" -Exception $_.Exception - } - } else { - Add-LogMessage -Level InfoSuccess "Private DNS zone '$($DnsZone.Name)' is already connected to '$($VirtualNetwork.Name)'" - } - return $link -} -Export-ModuleMember -Function Connect-PrivateDnsToVirtualNetwork - - -# Create an Azure Private DNS zone -# -------------------------------- -function Deploy-PrivateDnsZone { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of private DNS zone to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName - ) - Add-LogMessage -Level Info "Ensuring that private DNS zone '$Name' exists..." - $zone = Get-AzPrivateDnsZone -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating private DNS zone '$Name'" - $zone = New-AzPrivateDnsZone -Name $Name -ResourceGroupName $ResourceGroupName - if ($?) { - Add-LogMessage -Level Success "Created private DNS zone '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create private DNS zone '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Private DNS zone '$Name' already exists" - } - return $zone -} -Export-ModuleMember -Function Deploy-PrivateDnsZone - - -# Create an Azure Private DNS zone -# -------------------------------- -function Deploy-PrivateDnsRecordSet { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the record to add")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Private IP address to point to")] - [string[]]$PrivateIpAddresses, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "Record type")] - [string]$RecordType = "A", - [Parameter(Mandatory = $false, HelpMessage = "TTL in seconds")] - [UInt32]$Ttl = 60, - [Parameter(Mandatory = $true, HelpMessage = "Name of private DNS zone to deploy")] - [string]$ZoneName - ) - Add-LogMessage -Level Info "Ensuring that private DNS record set '$Name' exists..." - $record = Get-AzPrivateDnsRecordSet -Name $Name -ResourceGroupName $ResourceGroupName -RecordType $RecordType -ZoneName $ZoneName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating private DNS record set '$Name'" - $privateDnsRecords = $PrivateIpAddresses | ForEach-Object { New-AzPrivateDnsRecordConfig -Ipv4Address $_ } - $record = New-AzPrivateDnsRecordSet -Name $Name -ResourceGroupName $ResourceGroupName -RecordType $RecordType -Ttl $Ttl -ZoneName $ZoneName -PrivateDnsRecords $privateDnsRecords - if ($?) { - Add-LogMessage -Level Success "Created private DNS record set '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create private DNS record set '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Private DNS record set '$Name' already exists" - } - return $record -} -Export-ModuleMember -Function Deploy-PrivateDnsRecordSet - - -# Create an Azure Private DNS zone -# -------------------------------- -function Get-PrivateDnsZones { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of subscription to retrieve zones for")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to check for zones")] - [string]$ResourceGroupName - ) - $originalContext = Get-AzContext - try { - $null = Set-AzContext -Subscription $SubscriptionName -ErrorAction Stop - $zones = Get-AzPrivateDnsZone -ResourceGroupName $ResourceGroupName - return $zones - } catch { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - Add-LogMessage -Level Fatal "Failed to load private DNS zones for resource group '$ResourceGroupName' in subscription '$SubscriptionName'!" -Exception $_.Exception - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction SilentlyContinue - } -} -Export-ModuleMember -Function Get-PrivateDnsZones diff --git a/deployment/common/AzureResources.psm1 b/deployment/common/AzureResources.psm1 deleted file mode 100644 index 2e34565c6a..0000000000 --- a/deployment/common/AzureResources.psm1 +++ /dev/null @@ -1,216 +0,0 @@ -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Deploy an ARM template and log the output -# ----------------------------------------- -function Deploy-ArmTemplate { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [ValidateNotNullOrEmpty()] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Template parameters")] - [ValidateNotNullOrEmpty()] - [System.Collections.Hashtable]$TemplateParameters, - [Parameter(Mandatory = $true, HelpMessage = "Path to template file")] - [ValidateNotNullOrEmpty()] - [string]$TemplatePath - ) - $templateName = Split-Path -Path "$TemplatePath" -LeafBase - # Note we must use inline parameters rather than -TemplateParameterObject in order to support securestring - # Furthermore, using -SkipTemplateParameterPrompt will cause inline parameters to fail - New-AzResourceGroupDeployment -DeploymentDebugLogLevel ResponseContent ` - -ErrorVariable templateErrors ` - -Name $templateName ` - -ResourceGroupName $ResourceGroupName ` - -TemplateFile $TemplatePath ` - -Verbose ` - @TemplateParameters - $result = $? - Add-DeploymentLogMessages -ResourceGroupName $ResourceGroupName -DeploymentName $templateName -ErrorDetails $templateErrors - if ($result) { - Add-LogMessage -Level Success "Template deployment '$templateName' succeeded" - } else { - Add-LogMessage -Level Fatal "Template deployment '$templateName' failed!" - } -} -Export-ModuleMember -Function Deploy-ArmTemplate - - -# Create resource group if it does not exist -# ------------------------------------------ -function Deploy-ResourceGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy")] - $Name, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy")] - $Location - ) - Add-LogMessage -Level Info "Ensuring that resource group '$Name' exists..." - $resourceGroup = Get-AzResourceGroup -Name $Name -Location $Location -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating resource group '$Name'" - $resourceGroup = New-AzResourceGroup -Name $Name -Location $Location -Force - if ($?) { - Add-LogMessage -Level Success "Created resource group '$Name'" - } else { - Add-LogMessage -Level Fatal "Failed to create resource group '$Name'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Resource group '$Name' already exists" - } - return $resourceGroup -} -Export-ModuleMember -Function Deploy-ResourceGroup - - -# Attach an RBAC role to a principal -# ---------------------------------- -function Deploy-RoleAssignment { - param( - [Parameter(Mandatory = $true, HelpMessage = "ID of object that the role will be granted to")] - [string]$ObjectId, - [Parameter(Mandatory = $true, HelpMessage = "Name of role to be assigned")] - [string]$RoleDefinitionName, - [Parameter(Mandatory = $false, HelpMessage = "Name of resource group to apply the role over")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $false, HelpMessage = "URI of the object to apply the role over")] - [string]$Scope - ) - # Validate arguments - if ([boolean]$ResourceGroupName -eq [boolean]$Scope) { - Add-LogMessage -Level Fatal "Failed to create role assignment, cannot declare both ResourceGroupName and Scope." - } - - # Check if assignment exists - Add-LogMessage -Level Info "Ensuring that role assignment for $ObjectId as $RoleDefinitionName over $($Scope ? $Scope : $ResourceGroupName) exists..." - if ($Scope) { - $Assignment = Get-AzRoleAssignment -ObjectId $ObjectId ` - -RoleDefinitionName $RoleDefinitionName ` - -Scope $Scope ` - -ErrorAction SilentlyContinue - } else { - $Assignment = Get-AzRoleAssignment -ObjectId $ObjectId ` - -RoleDefinitionName $RoleDefinitionName ` - -ResourceGroupName $ResourceGroupName ` - -ErrorAction SilentlyContinue - } - - if ($Assignment) { - Add-LogMessage -Level InfoSuccess "Role assignment '$RoleDefinitionName' over '$($Scope ? $Scope : $ResourceGroupName)' already exists" - } else { - try { - Add-LogMessage -Level Info "[ ] Creating role assignment '$RoleDefinitionName' over '$($Scope ? $Scope : $ResourceGroupName)'..." - if ($Scope) { - $Assignment = New-AzRoleAssignment -ObjectId $ObjectId ` - -RoleDefinitionName $RoleDefinitionName ` - -Scope $Scope ` - -ErrorAction Stop - } else { - $Assignment = New-AzRoleAssignment -ObjectId $ObjectId ` - -RoleDefinitionName $RoleDefinitionName ` - -ResourceGroupName $ResourceGroupName ` - -ErrorAction Stop - } - Add-LogMessage -Level Success "Successfully created role assignment '$RoleDefinitionName' over '$($Scope ? $Scope : $ResourceGroupName)'" - } catch { - Add-LogMessage -Level Fatal "Failed to create role assignment '$RoleDefinitionName' over '$($Scope ? $Scope : $ResourceGroupName)'" -Exception $_.Exception - } - } - return $Assignment -} -Export-ModuleMember -Function Deploy-RoleAssignment - - -# Get the resource ID for a named resource -# ---------------------------------------- -function Get-ResourceGroupName { - param( - [Parameter(Mandatory = $true, HelpMessage = "Resource to obtain ID for")] - [System.Object]$ResourceName - ) - return Get-AzResource | Where-Object { $_.Name -eq $ResourceName } | ForEach-Object { $_.ResourceGroupName } | Select-Object -First 1 -} -Export-ModuleMember -Function Get-ResourceGroupName - - -# Get the resource ID for a named resource -# ---------------------------------------- -function Get-ResourceId { - param( - [Parameter(Mandatory = $true, HelpMessage = "Resource to obtain ID for")] - [System.Object]$ResourceName - ) - return Get-AzResource | Where-Object { $_.Name -eq $ResourceName } | ForEach-Object { $_.ResourceId } | Select-Object -First 1 -} -Export-ModuleMember -Function Get-ResourceId - - -# Get the resource ID for a named resource -# ---------------------------------------- -function Get-ResourcesInGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Resource group to check for resources")] - [string]$ResourceGroupName - ) - return Get-AzResource | Where-Object { $_.ResourceGroupName -eq $ResourceGroupName } -} -Export-ModuleMember -Function Get-ResourcesInGroup - - -# Remove resource groups and the resources they contain -# ----------------------------------------------------- -function Remove-AllResourceGroups { - param( - [Parameter(Mandatory = $false, HelpMessage = "Maximum number of iterations to attempt")] - [int]$MaxAttempts = 10, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to remove")] - [string[]]$ResourceGroupNames - ) - for ($i = 0; $i -lt $MaxAttempts; $i++) { - try { - $ResourceGroups = Get-AzResourceGroup -ErrorAction Stop | Where-Object { $ResourceGroupNames.Contains($_.ResourceGroupName) } - if (-not $ResourceGroups.Count) { return } - Add-LogMessage -Level Info "Found $($ResourceGroups.Count) resource group(s) to remove..." - $InitialNames = $ResourceGroups | ForEach-Object { $_.ResourceGroupName } - # Schedule removal of existing resource groups then wait for a minute - $ResourceGroups | ForEach-Object { Remove-ResourceGroup -Name $_.ResourceGroupName -NoWait } - Start-Sleep 60 - # Check for current resource groups and output any successfully removed resource groups - $FinalNames = Get-AzResourceGroup -ErrorAction Stop | ForEach-Object { $_.ResourceGroupName } | Where-Object { $ResourceGroupNames.Contains($_) } - $InitialNames | Where-Object { -not $FinalNames.Contains($_) } | ForEach-Object { - Add-LogMessage -Level Success "Removed resource group $_" - } - } catch { - $lastException = $_.Exception - } - } - Add-LogMessage -Level Fatal "Failed to remove all requested resource groups!" -Exception $lastException -} -Export-ModuleMember -Function Remove-AllResourceGroups - - -# Remove a resource group if it exists -# ------------------------------------ -function Remove-ResourceGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to remove")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Do not wait for the removal to complete")] - [switch]$NoWait - ) - Add-LogMessage -Level Info "Attempting to remove resource group '$Name'..." - try { - $ResourceGroup = Get-AzResourceGroup -ResourceGroupName $Name -ErrorAction Stop - if ($NoWait.IsPresent) { - $null = Get-AzResource -ErrorAction SilentlyContinue -ErrorVariable $ignored | Where-Object { $_.ResourceGroupName -eq $Name } | Remove-AzResource -AsJob -ErrorAction SilentlyContinue -ErrorVariable $ignored - $null = Remove-AzResourceGroup -ResourceId $ResourceGroup.ResourceId -Force -Confirm:$False -AsJob -ErrorAction SilentlyContinue -ErrorVariable $ignored - } else { - $null = Remove-AzResourceGroup -ResourceId $ResourceGroup.ResourceId -Force -Confirm:$False -ErrorAction Stop - Add-LogMessage -Level Success "Removing resource group '$Name' succeeded" - } - } catch { - Add-LogMessage -Level Warning "Removing resource group '$Name' failed!" -Exception $_.Exception - } -} -Export-ModuleMember -Function Remove-ResourceGroup diff --git a/deployment/common/AzureStorage.psm1 b/deployment/common/AzureStorage.psm1 deleted file mode 100644 index d3ac7e0583..0000000000 --- a/deployment/common/AzureStorage.psm1 +++ /dev/null @@ -1,616 +0,0 @@ -Import-Module Az.Network -ErrorAction Stop -Import-Module Az.Storage -ErrorAction Stop -Import-Module $PSScriptRoot/AzureCompute -ErrorAction Stop -Import-Module $PSScriptRoot/AzureNetwork -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Clear contents of storage container if it exists -# ------------------------------------------------ -function Clear-StorageContainer { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage container")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Storage account container belongs to")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount - ) - Add-LogMessage -Level Info "Clearing contents of storage container '$Name'..." - try { - $StorageContainer = Get-AzStorageContainer -Name $Name -Context $StorageAccount.Context -ErrorAction Stop - $blobs = @(Get-AzStorageBlob -Container $StorageContainer.Name -Context $sreStorageAccount.Context) - $numBlobs = $blobs.Length - if ($numBlobs -gt 0) { - Add-LogMessage -Level Info "[ ] Deleting $numBlobs blobs already in container '$($StorageContainer.Name)'..." - $blobs | ForEach-Object { Remove-AzStorageBlob -Blob $_.Name -Container $StorageContainer.Name -Context $StorageAccount.Context -Force } - while ($numBlobs -gt 0) { - Start-Sleep -Seconds 5 - $numBlobs = (Get-AzStorageBlob -Container $StorageContainer.Name -Context $StorageAccount.Context).Length - } - if ($?) { - Add-LogMessage -Level Success "Blob deletion succeeded for storage container '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - } else { - Add-LogMessage -Level Fatal "Blob deletion failed for storage container '$Name' in storage account '$($StorageAccount.StorageAccountName)'!" - } - } - } catch { - Add-LogMessage -Level InfoSuccess "Storage container '$Name' does not exist in storage account '$($StorageAccount.StorageAccountName)'" - } -} -Export-ModuleMember -Function Clear-StorageContainer - - -# Generate a new SAS policy -# Note that there is a limit of 5 policies for a given storage account/container -# ------------------------------------------------------------------------------ -function Deploy-SasAccessPolicy { - param( - [Parameter(Mandatory = $true, HelpMessage = "Policy name")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Policy permissions")] - [string]$Permission, - [Parameter(Mandatory = $true, HelpMessage = "Storage account")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount, - [Parameter(Mandatory = $false, ParameterSetName = "ByContainerName", HelpMessage = "Container name")] - [string]$ContainerName, - [Parameter(Mandatory = $false, ParameterSetName = "ByShareName", HelpMessage = "Container name")] - [string]$ShareName, - [Parameter(Mandatory = $false, HelpMessage = "Validity in years")] - [int]$ValidityYears = 20, - [Parameter(Mandatory = $false, HelpMessage = "Force generating a new policy")] - [switch]$Force - ) - $Identifier = $ContainerName ? "container '$ContainerName'" : $ShareName ? "share '$ShareName'" : "" - $PolicyName = "${identifier}${Name}".Replace(" ", "").Replace("'", "").ToLower() - Add-LogMessage -Level Info "Ensuring that SAS policy '$PolicyName' exists for $Identifier in '$($StorageAccount.StorageAccountName)'..." - if ($ContainerName) { - $policy = Get-AzStorageContainerStoredAccessPolicy -Container $ContainerName -Policy $PolicyName -Context $StorageAccount.Context -ErrorAction SilentlyContinue - } elseif ($ShareName) { - $policy = Get-AzStorageShareStoredAccessPolicy -ShareName $ContainerName -Policy $PolicyName -Context $StorageAccount.Context -ErrorAction SilentlyContinue - } - if ($policy -and -not $Force) { - Add-LogMessage -Level InfoSuccess "Found existing SAS policy '$PolicyName' for $Identifier in '$($StorageAccount.StorageAccountName)'" - } - if ($Force -and -not $policy) { - Add-LogMessage -Level Fatal "No existing SAS policy '$PolicyName' for $Identifier in '$($StorageAccount.StorageAccountName)'" - } - - if (-not $Force) { - Add-LogMessage -Level Info "[ ] Creating new SAS policy '$PolicyName' for $Identifier in '$($StorageAccount.StorageAccountName)'" - } else { - Add-LogMessage -Level Info "[ ] Updating SAS policy '$PolicyName' for $Identifier in '$($StorageAccount.StorageAccountName)'" - } - $StartTime = (Get-Date).AddMinutes(-1) # allow for possible clock-skew between different systems - $ExpiryTime = $StartTime.AddYears($ValidityYears) - $success = $false - if ($ContainerName) { - if (-not $Force) { - $null = New-AzStorageContainerStoredAccessPolicy -Container $ContainerName -Policy $PolicyName -Context $StorageAccount.Context -Permission $Permission -StartTime $StartTime -ExpiryTime $ExpiryTime - } else { - $null = Set-AzStorageContainerStoredAccessPolicy -Container $ContainerName -Policy $PolicyName -Context $StorageAccount.Context -Permission $Permission -StartTime $StartTime -ExpiryTime $ExpiryTime - } - $policy = Get-AzStorageContainerStoredAccessPolicy -Container $ContainerName -Policy $PolicyName -Context $StorageAccount.Context - $success = $? - } elseif ($ShareName) { - if (-not $Force) { - $null = New-AzStorageShareStoredAccessPolicy -ShareName $ShareName -Policy $PolicyName -Context $StorageAccount.Context -Permission $Permission -StartTime $StartTime -ExpiryTime $ExpiryTime - } else { - $null = Set-AzStorageShareStoredAccessPolicy -ShareName $ShareName -Policy $PolicyName -Context $StorageAccount.Context -Permission $Permission -StartTime $StartTime -ExpiryTime $ExpiryTime - } - $policy = Get-AzStorageShareStoredAccessPolicy -ShareName $ShareName -Policy $PolicyName -Context $StorageAccount.Context - $success = $? - } - if ($success) { - Add-LogMessage -Level Success "Created new SAS policy '$PolicyName' for $Identifier in '$($StorageAccount.StorageAccountName)'" - } else { - Add-LogMessage -Level Fatal "Failed to create new SAS policy '$PolicyName' for $Identifier in '$($StorageAccount.StorageAccountName)'!" - } - return $policy -} -Export-ModuleMember -Function Deploy-SasAccessPolicy - - -# Create storage account if it does not exist -# ------------------------------------------ -function Deploy-StorageAccount { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage account to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Location of resource group to deploy into")] - [string]$Location, - [Parameter(Mandatory = $false, HelpMessage = "SKU name of the storage account to deploy")] - [string]$SkuName = "Standard_GRS", - [Parameter(Mandatory = $false, HelpMessage = "Kind of storage account to deploy")] - [ValidateSet("StorageV2", "BlobStorage", "BlockBlobStorage", "FileStorage")] - [string]$Kind = "StorageV2", - [Parameter(Mandatory = $false, HelpMessage = "Access tier of the Storage account. Only used if 'Kind' is set to 'BlobStorage'")] - [string]$AccessTier = "Hot", - [Parameter(Mandatory = $false, HelpMessage = "Allow traffic over http as well as https (required for NFS file shares)")] - [switch]$AllowHttpTraffic - ) - Add-LogMessage -Level Info "Ensuring that storage account '$Name' exists in '$ResourceGroupName'..." - $storageAccount = Get-AzStorageAccount -Name $Name -ResourceGroupName $ResourceGroupName -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating storage account '$Name'" - $params = @{} - if ($Kind -eq "BlobStorage") { $params["AccessTier"] = $AccessTier } - if ($AllowHttpTraffic) { - $params["EnableHttpsTrafficOnly"] = $false - Add-LogMessage -Level Warning "Storage account '$Name' will be deployed with EnableHttpsTrafficOnly disabled. Note that this can take up to 15 minutes to complete." - } - try { - $storageAccount = New-AzStorageAccount -Name $Name -ResourceGroupName $ResourceGroupName -Location $Location -SkuName $SkuName -Kind $Kind @params -ErrorAction Stop - Add-LogMessage -Level Success "Created storage account '$Name'" - } catch { - Add-LogMessage -Level Fatal "Failed to create storage account '$Name'!" -Exception $_.Exception - } - } else { - Add-LogMessage -Level InfoSuccess "Storage account '$Name' already exists" - } - return $storageAccount -} -Export-ModuleMember -Function Deploy-StorageAccount - - -# Create storage account private endpoint if it does not exist -# ------------------------------------------------------------ -function Deploy-StorageAccountEndpoint { - param( - [Parameter(Mandatory = $true, HelpMessage = "Storage account to generate a private endpoint for")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy the endpoint into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subnet to deploy the endpoint into")] - [Microsoft.Azure.Commands.Network.Models.PSSubnet]$Subnet, - [Parameter(Mandatory = $true, HelpMessage = "Type of storage to connect to (Blob, File or Default)")] - [ValidateSet("Blob", "File", "Default")] - [string]$StorageType, - [Parameter(Mandatory = $true, HelpMessage = "Location to deploy the endpoint into")] - [string]$Location - ) - # Allow a default if we're using a storage account that is only compatible with one storage type - if ($StorageType -eq "Default") { - if ($StorageAccount.Kind -eq "BlobStorage") { $StorageType = "Blob" } - elseif ($StorageAccount.Kind -eq "BlockBlobStorage") { $StorageType = "Blob" } - elseif ($StorageAccount.Kind -eq "FileStorage") { $StorageType = "File" } - elseif ($StorageAccount.Kind -eq "StorageV2") { $StorageType = "Blob" } # default to blob storage for StorageV2 - else { Add-LogMessage -Level Fatal "Storage type must be specified as 'Blob' or 'File' for $($StorageAccount.Kind) storage accounts" } - } - # Validate that the storage type is compatible with this storage account - if ((($StorageAccount.Kind -eq "BlobStorage") -and ($StorageType -ne "Blob")) -or - (($StorageAccount.Kind -eq "BlockBlobStorage") -and ($StorageType -ne "Blob")) -or - (($StorageAccount.Kind -eq "FileStorage") -and ($StorageType -ne "File"))) { - Add-LogMessage -Level Fatal "Storage type '$StorageType' is not compatible with '$($StorageAccount.StorageAccountName)' which uses '$($StorageAccount.Kind)'" - } - # Disable private endpoint network policies on the subnet - # See here for further information: https://docs.microsoft.com/en-us/azure/private-link/disable-private-endpoint-network-policy - # Note that this means that NSG rules will *not* apply to the private endpoint - if ($Subnet.PrivateEndpointNetworkPolicies -ne "Disabled") { - Add-LogMessage -Level Info "[ ] Disabling private endpoint network policies on '$($Subnet.Name)'..." - $virtualNetwork = Get-VirtualNetworkFromSubnet -Subnet $Subnet - ($virtualNetwork | Select-Object -ExpandProperty Subnets | Where-Object { $_.Name -eq $Subnet.Name }).PrivateEndpointNetworkPolicies = "Disabled" - $virtualNetwork | Set-AzVirtualNetwork - if ($?) { - Add-LogMessage -Level Success "Disabled private endpoint network policies on '$($Subnet.Name)'" - } else { - Add-LogMessage -Level Fatal "Failed to disable private endpoint network policies on '$($Subnet.Name)'!" - } - } - # Ensure that the private endpoint exists - $privateEndpointName = "$($StorageAccount.StorageAccountName)-endpoint" - Add-LogMessage -Level Info "Ensuring that private endpoint '$privateEndpointName' for storage account '$($StorageAccount.StorageAccountName)' exists..." - try { - $privateEndpoint = Get-AzPrivateEndpoint -Name $privateEndpointName -ResourceGroupName $ResourceGroupName -ErrorAction Stop - if ($privateEndpoint.PrivateLinkServiceConnections.PrivateLinkServiceConnectionState.Status -eq "Disconnected") { - $null = Remove-AzPrivateEndpoint -Name $privateEndpointName -ResourceGroupName $ResourceGroupName -Force - Start-Sleep 10 - $privateEndpoint = Get-AzPrivateEndpoint -Name $privateEndpointName -ResourceGroupName $ResourceGroupName -ErrorAction Stop - } - Add-LogMessage -Level InfoSuccess "Private endpoint '$privateEndpointName' already exists for storage account '$($StorageAccount.StorageAccountName)'" - } catch [Microsoft.Azure.Commands.Network.Common.NetworkCloudException] { - try { - Add-LogMessage -Level Info "[ ] Creating private endpoint '$privateEndpointName' for storage account '$($StorageAccount.StorageAccountName)'" - $privateEndpointConnection = New-AzPrivateLinkServiceConnection -Name "${privateEndpointName}ServiceConnection" -PrivateLinkServiceId $StorageAccount.Id -GroupId $StorageType -ErrorAction Stop - $privateEndpoint = New-AzPrivateEndpoint -Name $privateEndpointName -ResourceGroupName $ResourceGroupName -Location $Location -Subnet $Subnet -PrivateLinkServiceConnection $privateEndpointConnection -ErrorAction Stop - Add-LogMessage -Level Success "Created private endpoint '$privateEndpointName' for storage account '$($StorageAccount.StorageAccountName)'" - } catch { - Add-LogMessage -Level Fatal "Failed to create private endpoint '$privateEndpointName' for storage account '$($StorageAccount.StorageAccountName)'!" -Exception $_.Exception - } - } - return $privateEndpoint -} -Export-ModuleMember -Function Deploy-StorageAccountEndpoint - - -# Create storage container if it does not exist -# --------------------------------------------- -function Deploy-StorageContainer { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage container to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to deploy into")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount - ) - Add-LogMessage -Level Info "Ensuring that storage container '$Name' exists..." - $storageContainer = Get-AzStorageContainer -Name $Name -Context $StorageAccount.Context -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating storage container '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - try { - $storageContainer = New-AzStorageContainer -Name $Name -Context $StorageAccount.Context -ErrorAction Stop - Add-LogMessage -Level Success "Created storage container '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - } catch { - # Sometimes the storage container exists but Powershell does not recognise this until it attempts to create it - $storageContainer = Get-AzStorageContainer -Name $Name -Context $StorageAccount.Context -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Fatal "Failed to create storage container '$Name' in storage account '$($StorageAccount.StorageAccountName)'!" -Exception $_.Exception - } else { - Add-LogMessage -Level InfoSuccess "Storage container '$Name' already exists in storage account '$($StorageAccount.StorageAccountName)'" - } - } - } else { - Add-LogMessage -Level InfoSuccess "Storage container '$Name' already exists in storage account '$($StorageAccount.StorageAccountName)'" - } - return $storageContainer -} -Export-ModuleMember -Function Deploy-StorageContainer - - -# Create storage share if it does not exist -# ----------------------------------------- -function Deploy-StorageShare { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage share to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to deploy into")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount - ) - Add-LogMessage -Level Info "Ensuring that storage share '$Name' exists..." - $storageShare = Get-AzStorageShare -Name $Name -Context $StorageAccount.Context -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating storage share '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - $storageShare = New-AzStorageShare -Name $Name -Context $StorageAccount.Context - if ($?) { - Add-LogMessage -Level Success "Created storage share '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - } else { - Add-LogMessage -Level Fatal "Failed to create storage share '$Name' in storage account '$($StorageAccount.StorageAccountName)'!" - } - } else { - Add-LogMessage -Level InfoSuccess "Storage share '$Name' already exists in storage account '$($StorageAccount.StorageAccountName)'" - } - return $storageShare -} -Export-ModuleMember -Function Deploy-StorageShare - - -# Create storage share if it does not exist -# ----------------------------------------- -function Deploy-StorageNfsShare { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage share to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to deploy into")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount, - [Parameter(Mandatory = $false, HelpMessage = "Size of NFS quota in GB")] - [int]$QuotaGiB - ) - Add-LogMessage -Level Info "Ensuring that NFS storage share '$Name' exists..." - $storageShare = Get-AzStorageShare -Name $Name -Context $StorageAccount.Context -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "[ ] Creating NFS storage share '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - $params = @{} - if ($QuotaGiB) { $params["QuotaGiB"] = $QuotaGiB } - New-AzRmStorageShare -Name $Name ` - -ResourceGroupName $StorageAccount.ResourceGroupName ` - -StorageAccountName $StorageAccount.StorageAccountName ` - -EnabledProtocol "NFS" ` - -RootSquash "NoRootSquash" ` - @params - $storageShare = Get-AzStorageShare -Name $Name -Context $StorageAccount.Context -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($storageShare) { - Add-LogMessage -Level Success "Created NFS storage share '$Name' in storage account '$($StorageAccount.StorageAccountName)'" - } else { - Add-LogMessage -Level Fatal "Failed to create NFS storage share '$Name' in storage account '$($StorageAccount.StorageAccountName)'!" - } - } else { - Add-LogMessage -Level InfoSuccess "NFS storage share '$Name' already exists in storage account '$($StorageAccount.StorageAccountName)'" - } - return $storageShare -} -Export-ModuleMember -Function Deploy-StorageNfsShare - - -# Ensure that storage receptable (either container or share) exists -# ----------------------------------------------------------------- -function Deploy-StorageReceptacle { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage receptacle to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to deploy into")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount, - [Parameter(Mandatory = $true, HelpMessage = "Type of storage receptacle to create (Share, Container or NfsShare)")] - [ValidateSet("Share", "Container", "NfsShare")] - [string]$StorageType - ) - if ($StorageType -eq "Share") { - return Deploy-StorageShare -Name $Name -StorageAccount $StorageAccount - } elseif ($StorageType -eq "Container") { - return Deploy-StorageContainer -Name $Name -StorageAccount $StorageAccount - } elseif ($StorageType -eq "NfsShare") { - return Deploy-StorageNfsShare -Name $Name -StorageAccount $StorageAccount - } - Add-LogMessage -Level Fatal "Unable to create a storage receptacle of type '$MountStorageTypeType'!" -} -Export-ModuleMember -Function Deploy-StorageReceptacle - - -# Get storage account possibly in another subscription -# ---------------------------------------------------- -function Get-StorageAccount { - # Note that in order to use @Args we must not include any [Parameter(...)] information - param( - [string]$SubscriptionName - ) - $originalContext = Get-AzContext - $null = Set-AzContext -SubscriptionId $SubscriptionName -ErrorAction Stop - $StorageAccount = Get-AzStorageAccount @Args - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - return $StorageAccount -} -Export-ModuleMember -Function Get-StorageAccount - - -# Get all available endpoints for a given storage account -# ------------------------------------------------------- -function Get-StorageAccountEndpoints { - param( - [Parameter(Mandatory = $true, HelpMessage = "Storage account to deploy into")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount - ) - return @( - @($StorageAccount.PrimaryEndpoints.Blob, - $StorageAccount.PrimaryEndpoints.Queue, - $StorageAccount.PrimaryEndpoints.Table, - $StorageAccount.PrimaryEndpoints.File, - $StorageAccount.PrimaryEndpoints.Web, - $StorageAccount.PrimaryEndpoints.Dfs) | Where-Object { $_ } - ) -} -Export-ModuleMember -Function Get-StorageAccountEndpoints - - -# Generate a new SAS token -# ------------------------ -function New-StorageAccountSasToken { - param( - [Parameter(Mandatory = $true, HelpMessage = "Enter subscription name")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Enter storage account resource group")] - [string]$ResourceGroup, - [Parameter(Mandatory = $true, HelpMessage = "Enter storage account name")] - [string]$AccountName, - [Parameter(Mandatory = $true, HelpMessage = "Enter service(s): one or more of (Blob, File, Table, Queue)")] - [string[]]$Service, - [Parameter(Mandatory = $true, HelpMessage = "Enter resource type(s): one or more of (Service, Container, Object)")] - [string[]]$ResourceType, - [Parameter(Mandatory = $true, HelpMessage = "Enter permission string")] - [string]$Permission, - [Parameter(Mandatory = $false, HelpMessage = "Enter validity in hours")] - [int]$ValidityHours = 2 - ) - - # Temporarily switch to storage account subscription - $originalContext = Get-AzContext - $null = Set-AzContext -Subscription $SubscriptionName -ErrorAction Stop - - # Generate SAS token - $accountKey = (Get-AzStorageAccountKey -ResourceGroupName $ResourceGroup -AccountName $AccountName).Value[0] - $accountContext = (New-AzStorageContext -StorageAccountName $AccountName -StorageAccountKey $accountKey) - $expiryTime = ((Get-Date) + (New-TimeSpan -Hours $ValidityHours)) - $sasToken = (New-AzStorageAccountSASToken -Service $Service -ResourceType $ResourceType -Permission $Permission -ExpiryTime $expiryTime -Context $accountContext) - - # Switch back to previous subscription - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - return $sasToken -} -Export-ModuleMember -Function New-StorageAccountSasToken - - -# Generate a new read-only SAS token -# ---------------------------------- -function New-ReadOnlyStorageAccountSasToken { - param( - [Parameter(Mandatory = $true, HelpMessage = "Enter subscription name")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Enter storage account resource group")] - [string]$ResourceGroup, - [Parameter(Mandatory = $true, HelpMessage = "Enter storage account name")] - [string]$AccountName - ) - return New-StorageAccountSasToken -SubscriptionName "$SubscriptionName" ` - -ResourceGroup "$ResourceGroup" ` - -AccountName "$AccountName" ` - -Service Blob, File ` - -ResourceType Service, Container, Object ` - -Permission "rl" -} -Export-ModuleMember -Function New-ReadOnlyStorageAccountSasToken - - -# Generate a new SAS policy -# ------------------------- -function New-StorageReceptacleSasToken { - param( - [Parameter(Mandatory = $true, HelpMessage = "Storage account")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount, - [Parameter(Mandatory = $true, HelpMessage = "Name of an access policy that is valid for this storage account")] - [string]$PolicyName, - [Parameter(Mandatory = $false, ParameterSetName = "ByContainerName", HelpMessage = "Container name")] - [string]$ContainerName, - [Parameter(Mandatory = $false, ParameterSetName = "ByShareName", HelpMessage = "Container name")] - [string]$ShareName - ) - $identifier = $ContainerName ? "container '$ContainerName'" : $ShareName ? "share '$ShareName'" : "" - Add-LogMessage -Level Info "Generating new SAS token for $identifier in '$($StorageAccount.StorageAccountName)..." - if ($ContainerName) { - $SasToken = New-AzStorageContainerSASToken -Name $ContainerName -Policy $PolicyName -Context $StorageAccount.Context - $expiryTime = (Get-AzStorageContainerStoredAccessPolicy -Container $ContainerName -Policy $PolicyName -Context $StorageAccount.Context).ExpiryTime - } elseif ($ShareName) { - $SasToken = New-AzStorageShareSASToken -ShareName $ShareName -Policy $PolicyName -Context $StorageAccount.Context - $expiryTime = (Get-AzStorageShareStoredAccessPolicy -ShareName $ContainerName -Policy $PolicyName -Context $StorageAccount.Context).ExpiryTime - } - if ($?) { - Add-LogMessage -Level Success "Created new SAS token for $identifier in '$($StorageAccount.StorageAccountName)' valid until $($expiryTime.UtcDateTime.ToString('yyyy-MM-dd'))" - } else { - Add-LogMessage -Level Fatal "Failed to create new SAS token for $identifier in '$($StorageAccount.StorageAccountName)!" - } - return $SasToken -} -Export-ModuleMember -Function New-StorageReceptacleSasToken - - -# Send local files to a Linux VM -# ------------------------------ -function Send-FilesToLinuxVM { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to create")] - [string]$LocalDirectory, - [Parameter(Mandatory = $true, HelpMessage = "Name of DNS zone to create")] - [string]$RemoteDirectory, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to generate a private endpoint for")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$BlobStorageAccount, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to generate a private endpoint for")] - [string]$VMName, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to generate a private endpoint for")] - [string]$VMResourceGroupName - ) - $originalContext = Get-AzContext - $storageAccountSubscription = $BlobStorageAccount.Id.Split("/")[2] - $ResolvedPath = Get-Item -Path $LocalDirectory - - # Zip files from the local directory - try { - $zipFileDir = New-Item -ItemType Directory -Path (Join-Path ([System.IO.Path]::GetTempPath()) ([Guid]::NewGuid().ToString())) - $zipFileContainerName = [Guid]::NewGuid().ToString() - $zipFileName = "${zipFileContainerName}.zip" - $zipFilePath = Join-Path $zipFileDir $zipFileName - if (Test-Path $zipFilePath) { Remove-Item $zipFilePath } - Add-LogMessage -Level Info "[ ] Creating zip file at $zipFilePath..." - Compress-Archive -CompressionLevel NoCompression -Path $ResolvedPath -DestinationPath $zipFilePath -ErrorAction Stop - Add-LogMessage -Level Success "Zip file creation succeeded" - } catch { - $null = Remove-Item -Path $zipFileDir -Recurse -Force -ErrorAction SilentlyContinue - Add-LogMessage -Level Fatal "Zip file creation failed!" - } - - # Upload the zipfile to blob storage - Add-LogMessage -Level Info "[ ] Uploading zip file to container '$zipFileContainerName'..." - try { - $null = Set-AzContext -SubscriptionId $storageAccountSubscription -ErrorAction Stop - $null = Deploy-StorageContainer -Name $zipFileContainerName -StorageAccount $BlobStorageAccount - $null = Set-AzStorageBlobContent -Container $zipFileContainerName -Context $BlobStorageAccount.Context -File $zipFilePath -Blob $zipFileName -Force -ErrorAction Stop - Add-LogMessage -Level Success "Successfully uploaded zip file to '$zipFileContainerName'" - } catch { - $null = Remove-Item -Path $zipFileDir -Recurse -Force -ErrorAction SilentlyContinue - $null = Remove-AzStorageContainer -Name $zipFileContainerName -Context $BlobStorageAccount.Context -Force -ErrorAction SilentlyContinue - Add-LogMessage -Level Fatal "Failed to upload zip file to '$zipFileContainerName'! Is your current IP address in the set of permitted deploymentIpAddresses?" -Exception $_.Exception - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } - - # Remove zip file directory - Add-LogMessage -Level Info "[ ] Cleaning up directory $zipFileDir..." - try { - $null = Remove-Item -Path $zipFileDir -Recurse -Force -ErrorAction Stop - Add-LogMessage -Level Success "Successfully cleaned up '$zipFileDir'" - } catch { - Add-LogMessage -Level Fatal "Failed to clean up '$zipFileDir'!" - } - - # Generate a SAS token and construct URL - Add-LogMessage -Level Info "[ ] Generating download URL..." - $sasToken = New-ReadOnlyStorageAccountSasToken -AccountName $BlobStorageAccount.StorageAccountName -ResourceGroup $BlobStorageAccount.ResourceGroupName -SubscriptionName $storageAccountSubscription - $remoteUrl = "$($BlobStorageAccount.PrimaryEndpoints.Blob)${zipFileContainerName}/${zipFileName}${sasToken}" - Add-LogMessage -Level Success "Constructed download URL for storage account $($BlobStorageAccount.StorageAccountName)." - - # Download the zip file onto the remote machine using curl - $script = @("#!/bin/bash", - "tmpdir=`$(mktemp -d)", - "curl -X GET -o `$tmpdir/${zipFileName} '${remoteUrl}' 2>&1", - "mkdir -p ${RemoteDirectory}", - "rm -rf ${RemoteDirectory}/*", - "unzip `$tmpdir/${zipFileName} -d ${RemoteDirectory}", - "rm -rf `$tmpdir") -join "`n" - Add-LogMessage -Level Info "[ ] Downloading zip file onto $VMName" - $null = Invoke-RemoteScript -Shell "UnixShell" -Script $script -VMName $VMName -ResourceGroupName $VMResourceGroupName - - # Remove blob storage container - Add-LogMessage -Level Info "[ ] Cleaning up storage container '$zipFileContainerName'..." - try { - $null = Set-AzContext -SubscriptionId $storageAccountSubscription -ErrorAction Stop - $null = Remove-AzStorageContainer -Name $zipFileContainerName -Context $BlobStorageAccount.Context -Force -ErrorAction Stop - Add-LogMessage -Level Success "Successfully cleaned up '$zipFileContainerName'" - } catch { - Add-LogMessage -Level Failure "Failed to clean up '$zipFileContainerName'! Is your current IP address in the set of permitted deploymentIpAddresses?" -Exception $_.Exception - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } -} -Export-ModuleMember -Function Send-FilesToLinuxVM - - -# Create storage share if it does not exist -# ----------------------------------------- -function Set-StorageNfsShareQuota { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of storage share to deploy")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Size of quote in GB")] - [int]$Quota, - [Parameter(Mandatory = $true, HelpMessage = "Storage account to deploy into")] - [Microsoft.Azure.Commands.Management.Storage.Models.PSStorageAccount]$StorageAccount - ) - Add-LogMessage -Level Info "Setting storage quota for share '$Name' to $Quota GB..." - try { - $null = Set-AzStorageShareQuota -ShareName $Name -Quota $Quota -Context $StorageAccount.Context -ErrorAction Stop - $finalQuota = (Get-AzStorageShare -Name $Name -Context $StorageAccount.Context).Quota - if ($finalQuota -eq $Quota) { - Add-LogMessage -Level Success "Set storage quota for share '$Name' to $Quota GB" - } else { - Add-LogMessage -Level Failure "Failed to update storage quota for share '$Name'! Current quota is $finalQuota GB" - } - } catch { - Add-LogMessage -Level Failure "Failed to update storage share '$Name'! Is your current IP address in the set of permitted dataAdminIpAddresses?" -Exception $_.Exception - } -} -Export-ModuleMember -Function Set-StorageNfsShareQuota - - -# Create an Azure blob from a URI -# ------------------------------- -function Set-AzureStorageBlobFromUri { - param( - [Parameter(Mandatory = $true, HelpMessage = "URI to file to copy")] - [string]$FileUri, - [Parameter(Mandatory = $false, HelpMessage = "Filename to upload to")] - [string]$BlobFilename, - [Parameter(Mandatory = $true, HelpMessage = "Name of the destination container")] - [string]$StorageContainer, - [Parameter(Mandatory = $true, HelpMessage = "Storage context for the destination storage account")] - [Microsoft.Azure.Commands.Common.Authentication.Abstractions.IStorageContext]$StorageContext - ) - # Note that Start-AzStorageBlobCopy exists but is asynchronous and can stall - if (-not $BlobFilename) { - $BlobFilename = $FileUri -split "/" | Select-Object -Last 1 - } - $tempFolder = New-Item -Type Directory -Path (Join-Path ([System.IO.Path]::GetTempPath()) (New-Guid)) - # Suppress progress bars - $ProgressPreferenceOld = $ProgressPreference - $ProgressPreference = "SilentlyContinue" - $null = Invoke-WebRequest -Uri $FileUri -OutFile (Join-Path $tempFolder $BlobFilename) - $null = Set-AzStorageBlobContent -Container $StorageContainer -Context $StorageContext -File (Join-Path $tempFolder $BlobFilename) -Force - $ProgressPreference = $ProgressPreferenceOld - # Remove temporary directory - Remove-Item -Recurse $tempFolder -} -Export-ModuleMember -Function Set-AzureStorageBlobFromUri diff --git a/deployment/common/Configuration.psm1 b/deployment/common/Configuration.psm1 deleted file mode 100644 index a3145438ee..0000000000 --- a/deployment/common/Configuration.psm1 +++ /dev/null @@ -1,1153 +0,0 @@ -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.RecoveryServices -ErrorAction Stop # Note that this contains TimeZoneConverter -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/AzureNetwork -ErrorAction Stop -Import-Module $PSScriptRoot/Cryptography -ErrorAction Stop -Import-Module $PSScriptRoot/DataStructures -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Get root directory for configuration files -# ------------------------------------------ -function Get-ConfigRootDir { - try { - return Join-Path (Get-Item $PSScriptRoot).Parent.Parent.FullName "environment_configs" -Resolve -ErrorAction Stop - } catch [System.Management.Automation.ItemNotFoundException] { - Add-LogMessage -Level Fatal "Could not find the configuration file root directory!" - } -} - - -# Load minimal management config parameters from JSON config file into a hashtable -# -------------------------------------------------------------------------------- -function Get-CoreConfig { - param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId = $null - ) - # Construct filename for this config file - if ($sreId) { - $configFilename = "sre_${shmId}${sreId}_core_config.json" - } else { - $configFilename = "shm_${shmId}_core_config.json" - } - # Try to load the file - try { - $configPath = Join-Path $(Get-ConfigRootDir) $configFilename -Resolve -ErrorAction Stop - $configJson = Get-Content -Path $configPath -Raw -ErrorAction Stop | ConvertFrom-Json -AsHashtable -ErrorAction Stop - } catch [System.Management.Automation.ItemNotFoundException] { - Add-LogMessage -Level Fatal "Could not find a config file named '$configFilename'..." - } catch [System.ArgumentException] { - Add-LogMessage -Level Fatal "'$configPath' is not a valid JSON config file..." - } - # Ensure that naming structure is being adhered to - if ($sreId -and ($sreId -ne $configJson.sreId)) { - Add-LogMessage -Level Fatal "Config file '$configFilename' has an incorrect SRE ID: $($configJson.sreId)!" - } - if ($shmId -ne $configJson.shmId) { - Add-LogMessage -Level Fatal "Config file '$configFilename' has an incorrect SHM ID: $($configJson.shmId)!" - } - return $configJson -} - - -# Get SHM configuration -# --------------------- -function Get-ShmConfig { - param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - $shmId - ) - # Import minimal management config parameters from JSON config file - we can derive the rest from these - $shmConfigBase = Get-CoreConfig -shmId $shmId - - # Ensure the name in the config is < 27 characters excluding spaces - if ($shmConfigBase.name.Replace(" ", "").Length -gt 27) { - Add-LogMessage -Level Fatal "The 'name' entry in the core SHM config must have fewer than 27 characters (excluding spaces)." - } - - # Safe Haven management config - # ---------------------------- - $shm = [ordered]@{ - azureAdminGroupName = $shmConfigBase.azure.adminGroupName - azureAdTenantId = $shmConfigBase.azure.activeDirectoryTenantId - id = $shmConfigBase.shmId - location = $shmConfigBase.azure.location - name = $shmConfigBase.name - organisation = $shmConfigBase.organisation - rgPrefix = $shmConfigBase.overrides.rgPrefix ? $shmConfigBase.overrides.rgPrefix : "RG_SHM_$($shmConfigBase.shmId)".ToUpper() - nsgPrefix = $shmConfigBase.overrides.nsgPrefix ? $shmConfigBase.overrides.nsgPrefix : "NSG_SHM_$($shmConfigBase.shmId)".ToUpper() - subscriptionName = $shmConfigBase.azure.subscriptionName - vmImagesRgPrefix = $shmConfigBase.vmImages.rgPrefix ? $shmConfigBase.vmImages.rgPrefix : "RG_VMIMAGES" - storageTypeDefault = "Standard_GRS" - diskTypeDefault = "Standard_LRS" - dockerAccount = $shmConfigBase.docker.account ? $shmConfigBase.docker.account : "NA" - dockerPassword = $shmConfigBase.docker.password ? $shmConfigBase.docker.password : "NA" - } - # For normal usage this does not need to be user-configurable. - # However, if you are migrating an existing SHM you will need to ensure that the address spaces of the SHMs do not overlap - $shmIpPrefix = $shmConfigBase.overrides.ipPrefix ? $shmConfigBase.overrides.ipPrefix : "10.0.0" - - # Set timezone and NTP configuration - # Google is one of the few NTP services to provide an exhaustive, stable list of IP addresses. - # However, note that the Google NTP servers are incompatible with others due to leap-second smearing - # -------------------------------------------------------------------------------------------------- - $timezoneLinux = $shmConfigBase.timezone ? $shmConfigBase.timezone : "Europe/London" - $shm.time = [ordered]@{ - timezone = [ordered]@{ - linux = $timezoneLinux - windows = [TimeZoneConverter.TZConvert]::IanaToWindows($timezoneLinux) - } - ntp = [ordered]@{ - serverAddresses = @("216.239.35.0", "216.239.35.4", "216.239.35.8", "216.239.35.12") - serverFqdns = @("time.google.com", "time1.google.com", "time2.google.com", "time3.google.com", "time4.google.com") - } - } - - # SRD build images - # ---------------- - $vmImagesSubscriptionName = $shmConfigBase.vmImages.subscriptionName ? $shmConfigBase.vmImages.subscriptionName : $shm.subscriptionName - $vmImagesLocation = $shmConfigBase.vmImages.location ? $shmConfigBase.vmImages.location : $shm.location - # Since an ImageGallery cannot be moved once created, we must ensure that the location parameter matches any gallery that already exists - $originalContext = Get-AzContext - if ($originalContext) { - $null = Set-AzContext -SubscriptionId $vmImagesSubscriptionName -ErrorAction Stop - $locations = Get-AzResource | Where-Object { $_.ResourceGroupName -like "$($shm.vmImagesRgPrefix)_*" } | ForEach-Object { $_.Location } | Sort-Object | Get-Unique - if ($locations.Count -gt 1) { - Add-LogMessage -Level Fatal "Image building resources found in multiple locations: ${locations}!" - } elseif ($locations.Count -eq 1) { - if ($vmImagesLocation -ne $locations) { - Add-LogMessage -Level Fatal "Image building location ($vmImagesLocation) must be set to ${locations}!" - } - } - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } else { - Add-LogMessage -Level Warning "Skipping check for image building location as you are not logged in to Azure! Run Connect-AzAccount to log in." - } - # Construct build images config - $srdImageStorageSuffix = New-RandomLetters -SeedPhrase $vmImagesSubscriptionName - $shm.srdImage = [ordered]@{ - subscription = $vmImagesSubscriptionName - location = $vmImagesLocation - bootdiagnostics = [ordered]@{ - rg = "$($shm.vmImagesRgPrefix)_BOOT_DIAGNOSTICS" - accountName = "vmimagesbootdiag${srdImageStorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - } - build = [ordered]@{ - rg = "$($shm.vmImagesRgPrefix)_BUILD_CANDIDATES" - nsg = [ordered]@{ - name = "NSG_VMIMAGES_BUILD_CANDIDATES" - allowedIpAddresses = $shmConfigbase.vmImages.buildIpAddresses ? @($shmConfigbase.vmImages.buildIpAddresses) : @("193.60.220.240", "193.60.220.253") - rules = "vmimages-nsg-rules-build-candidates.json" - } - vnet = [ordered]@{ - name = "VNET_VMIMAGES" - cidr = "10.48.0.0/16" - } - subnet = [ordered]@{ - name = "BuildCandidatesSubnet" - cidr = "10.48.0.0/24" - } - # Installation of R packages (and some Python builds) is parallelisable - # We want a compute-optimised VM, since per-core performance is the bottleneck - # Standard_E2_v3 => 2 cores; 16GB RAM; 2.3 GHz; £0.1163/hr - # Standard_F4s_v2 => 4 cores; 8GB RAM; 3.7 GHz; £0.1506/hr - # Standard_D4_v3 => 4 cores; 16GB RAM; 2.4 GHz; £0.1730/hr - # Standard_E4_v3 => 4 cores; 32GB RAM; 2.3 GHz; £0.2326/hr - # Standard_F8s_v2 => 8 cores; 16GB RAM; 3.7 GHz; £0.3012/hr - # Standard_H8 => 8 cores; 56GB RAM; 3.6 GHz; £0.4271/hr - # Standard_E8_v3 => 8 cores; 64GB RAM; 2.3 GHz; £0.4651/hr - vm = [ordered]@{ - diskSizeGb = 128 - diskType = $shm.diskTypeDefault - size = "Standard_F8s_v2" - } - } - gallery = [ordered]@{ - rg = "$($shm.vmImagesRgPrefix)_GALLERY" - name = "DATA_SAFE_HAVEN_SHARED_IMAGES" - } - images = [ordered]@{ - rg = "$($shm.vmImagesRgPrefix)_STORAGE" - } - keyVault = [ordered]@{ - rg = "$($shm.vmImagesRgPrefix)_SECRETS" - name = "kv-shm-$($shm.id)-images".ToLower() | Limit-StringLength -MaximumLength 24 - } - network = [ordered]@{ - rg = "$($shm.vmImagesRgPrefix)_NETWORKING" - } - } - - # Domain config - # ------------- - $shm.domain = [ordered]@{ - fqdn = $shmConfigBase.domain - netbiosName = ($shmConfigBase.netbiosName ? $shmConfigBase.netbiosName : $shm.id).ToUpper() | Limit-StringLength -MaximumLength 15 -FailureIsFatal - dn = "DC=$(($shmConfigBase.domain).Replace('.',',DC='))" - ous = [ordered]@{ - databaseServers = [ordered]@{ name = "Secure Research Environment Database Servers" } - linuxServers = [ordered]@{ name = "Secure Research Environment Linux Servers" } - researchUsers = [ordered]@{ name = "Safe Haven Research Users" } - securityGroups = [ordered]@{ name = "Safe Haven Security Groups" } - serviceAccounts = [ordered]@{ name = "Safe Haven Service Accounts" } - identityServers = [ordered]@{ name = "Safe Haven Identity Servers" } - } - } - $shm.domain.fqdnLower = ($shm.domain.fqdn).ToLower() - $shm.domain.fqdnUpper = ($shm.domain.fqdn).ToUpper() - foreach ($ouName in $shm.domain.ous.Keys) { - $shm.domain.ous[$ouName].path = "OU=$($shm.domain.ous[$ouName].name),$($shm.domain.dn)" - } - # Security groups - $shm.domain.securityGroups = [ordered]@{ - computerManagers = [ordered]@{ name = "SG Safe Haven Computer Management Users" } - serverAdmins = [ordered]@{ name = "SG Safe Haven Server Administrators" } - } - foreach ($groupName in $shm.domain.securityGroups.Keys) { - $shm.domain.securityGroups[$groupName].description = $shm.domain.securityGroups[$groupName].name - } - - # Network config - # -------------- - # Deconstruct base address prefix to allow easy construction of IP based parameters - $shmPrefixOctets = $shmIpPrefix.Split(".") - $shmBasePrefix = "$($shmPrefixOctets[0]).$($shmPrefixOctets[1])" - $shmThirdOctet = ([int]$shmPrefixOctets[2]) - $shm.network = [ordered]@{ - vnet = [ordered]@{ - name = "VNET_SHM_$($shm.id)".ToUpper() - cidr = "${shmBasePrefix}.${shmThirdOctet}.0/21" - rg = "$($shm.rgPrefix)_NETWORKING".ToUpper() - subnets = [ordered]@{ - identity = [ordered]@{ - name = "IdentitySubnet" - cidr = "${shmBasePrefix}.${shmThirdOctet}.0/24" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_IDENTITY".ToUpper() - rules = "shm-nsg-rules-identity.json" - } - } - monitoring = [ordered]@{ - name = "MonitoringSubnet" - cidr = "${shmBasePrefix}.$([int]$shmThirdOctet + 1).0/24" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_MONITORING".ToUpper() - rules = "shm-nsg-rules-monitoring.json" - } - } - firewall = [ordered]@{ - # NB. The firewall subnet MUST be named 'AzureFirewallSubnet'. See https://docs.microsoft.com/en-us/azure/firewall/tutorial-firewall-deploy-portal - name = "AzureFirewallSubnet" - cidr = "${shmBasePrefix}.$([int]$shmThirdOctet + 2).0/24" - } - updateServers = [ordered]@{ - name = "UpdateServersSubnet" - cidr = "${shmBasePrefix}.$([int]$shmThirdOctet + 3).0/24" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_UPDATE_SERVERS".ToUpper() - rules = "shm-nsg-rules-update-servers.json" - } - } - gateway = [ordered]@{ - # NB. The Gateway subnet MUST be named 'GatewaySubnet'. See https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-vpn-faq#do-i-need-a-gatewaysubnet - name = "GatewaySubnet" - cidr = "${shmBasePrefix}.$([int]$shmThirdOctet + 7).0/24" - } - } - } - vpn = [ordered]@{ - cidr = "172.16.201.0/24" # NB. this must not overlap with the VNet that the VPN gateway is part of - } - } - foreach ($tier in @(2, 3)) { - $shmRepositoryPrefix = "10.10.${tier}" - $shm.network["vnetRepositoriesTier${tier}"] = [ordered]@{ - name = "VNET_SHM_$($shm.id)_PACKAGE_REPOSITORIES_TIER_${tier}".ToUpper() - cidr = "${shmRepositoryPrefix}.0/24" - rg = $shm.network.vnet.rg - subnets = [ordered]@{ - deployment = [ordered]@{ - name = "RepositoryDeploymentSubnet" - cidr = "${shmRepositoryPrefix}.0/26" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_REPOSITORY_DEPLOYMENT_TIER_${tier}".ToUpper() - rules = "shm-nsg-rules-repository-deployment-tier${tier}.json" - } - } - mirrorsExternal = [ordered]@{ - name = "RepositoryMirrorsExternalTier${tier}Subnet" - cidr = "${shmRepositoryPrefix}.64/26" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_REPOSITORY_MIRRORS_EXTERNAL_TIER${tier}".ToUpper() - rules = "shm-nsg-rules-repository-mirrors-external-tier${tier}.json" - } - } - mirrorsInternal = [ordered]@{ - name = "RepositoryMirrorsInternalTier${tier}Subnet" - cidr = "${shmRepositoryPrefix}.128/26" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_REPOSITORY_MIRRORS_INTERNAL_TIER${tier}".ToUpper() - rules = "shm-nsg-rules-repository-mirrors-internal-tier${tier}.json" - } - } - proxies = [ordered]@{ - name = "RepositoryProxiesTier${tier}Subnet" - cidr = "${shmRepositoryPrefix}.192/26" - nsg = [ordered]@{ - name = "$($shm.nsgPrefix)_REPOSITORY_PROXIES_TIER_${tier}".ToUpper() - rules = "shm-nsg-rules-repository-proxies-tier${tier}.json" - } - } - } - } - } - - # Monitoring config - # ----------------- - # Cloudflare IP public addresses from https://www.cloudflare.com/ips/ - $cloudFlareIpAddresses = @("103.21.244.0/22", "103.22.200.0/22", "103.31.4.0/22", "104.16.0.0/13", "104.24.0.0/14", "108.162.192.0/18", "131.0.72.0/22", "141.101.64.0/18", "162.158.0.0/15", "172.64.0.0/13", "173.245.48.0/20", "188.114.96.0/20", "190.93.240.0/20", "197.234.240.0/22", "198.41.128.0/17") - # Cloudfront IP public addresses from https://d7uri8nf7uskq.cloudfront.net/tools/list-cloudfront-ips - $cloudFrontIpAddresses = @("3.10.17.128/25", "3.101.158.0/23", "3.11.53.0/24", "3.128.93.0/24", "3.134.215.0/24", "3.231.2.0/25", "3.234.232.224/27", "3.236.169.192/26", "3.236.48.0/23", "3.35.130.128/25", "13.113.196.64/26", "13.113.203.0/24", "13.124.199.0/24", "13.210.67.128/26", "13.224.0.0/14", "13.228.69.0/24", "13.233.177.192/26", "13.249.0.0/16", "13.32.0.0/15", "13.35.0.0/16", "13.48.32.0/24", "13.54.63.128/26", "13.59.250.0/26", "15.158.0.0/16", "15.188.184.0/24", "15.207.13.128/25", "15.207.213.128/25", "18.154.0.0/15", "18.160.0.0/15", "18.164.0.0/15", "18.172.0.0/15", "18.192.142.0/23", "18.200.212.0/23", "18.216.170.128/25", "18.229.220.192/26", "18.238.0.0/15", "18.244.0.0/15", "18.64.0.0/14", "18.68.0.0/16", "34.195.252.0/24", "34.216.51.0/25", "34.223.12.224/27", "34.223.80.192/26", "34.226.14.0/24", "35.158.136.0/24", "35.162.63.192/26", "35.167.191.128/26", "36.103.232.0/25", "36.103.232.128/26", "44.227.178.0/24", "44.234.108.128/25", "44.234.90.252/30", "52.124.128.0/17", "52.15.127.128/26", "52.199.127.192/26", "52.212.248.0/26", "52.220.191.0/26", "52.222.128.0/17", "52.46.0.0/18", "52.47.139.0/24", "52.52.191.128/26", "52.56.127.0/25", "52.57.254.0/24", "52.66.194.128/26", "52.78.247.128/26", "52.82.128.0/19", "52.84.0.0/15", "54.182.0.0/16", "54.192.0.0/16", "54.230.0.0/17", "54.230.128.0/18", "54.230.200.0/21", "54.230.208.0/20", "54.230.224.0/19", "54.233.255.128/26", "54.239.128.0/18", "54.239.192.0/19", "54.240.128.0/18", "58.254.138.0/25", "58.254.138.128/26", "64.252.128.0/18", "64.252.64.0/18", "65.8.0.0/16", "65.9.0.0/17", "65.9.128.0/18", "70.132.0.0/18", "71.152.0.0/17", "99.79.169.0/24", "99.84.0.0/16", "99.86.0.0/16", "108.138.0.0/15", "108.156.0.0/14", "116.129.226.0/25", "116.129.226.128/26", "118.193.97.128/25", "118.193.97.64/26", "119.147.182.0/25", "119.147.182.128/26", "120.232.236.0/25", "120.232.236.128/26", "120.253.240.192/26", "120.253.241.160/27", "120.253.245.128/26", "120.253.245.192/27", "120.52.12.64/26", "120.52.153.192/26", "120.52.22.96/27", "120.52.39.128/27", "130.176.0.0/17", "130.176.128.0/18", "130.176.192.0/19", "130.176.224.0/20", "143.204.0.0/16", "144.220.0.0/16", "180.163.57.0/25", "180.163.57.128/26", "204.246.164.0/22", "204.246.168.0/22", "204.246.172.0/24", "204.246.173.0/24", "204.246.174.0/23", "204.246.176.0/20", "205.251.200.0/21", "205.251.208.0/20", "205.251.249.0/24", "205.251.250.0/23", "205.251.252.0/23", "205.251.254.0/24", "216.137.32.0/19", "223.71.11.0/27", "223.71.71.128/25", "223.71.71.96/27") - # All Microsoft public IP addresses from https://www.microsoft.com/en-us/download/confirmation.aspx?id=53602 - $microsoftIpAddresses = @("4.128.0.0/12", "4.144.0.0/12", "4.160.0.0/12", "4.176.0.0/12", "4.192.0.0/12", "4.208.0.0/12", "4.224.0.0/12", "4.240.0.0/12", "13.64.0.0/11", "13.96.0.0/13", "13.104.0.0/14", "20.0.0.0/11", "20.33.0.0/16", "20.34.0.0/15", "20.36.0.0/14", "20.40.0.0/13", "20.48.0.0/12", "20.64.0.0/10", "20.128.0.0/16", "20.130.0.0/16", "20.135.0.0/16", "20.136.0.0/16", "20.140.0.0/15", "20.143.0.0/16", "20.144.0.0/14", "20.150.0.0/15", "20.152.0.0/16", "20.153.0.0/16", "20.157.0.0/16", "20.158.0.0/15", "20.160.0.0/12", "20.176.0.0/14", "20.180.0.0/14", "20.184.0.0/13", "20.192.0.0/10", "23.96.0.0/13", "40.64.0.0/10", "40.162.0.0/16", "42.159.0.0/16", "51.4.0.0/15", "51.8.0.0/16", "51.10.0.0/15", "51.12.0.0/15", "51.18.0.0/16", "51.51.0.0/16", "51.53.0.0/16", "51.103.0.0/16", "51.104.0.0/15", "51.107.0.0/16", "51.116.0.0/16", "51.120.0.0/16", "51.124.0.0/16", "51.132.0.0/16", "51.136.0.0/15", "51.138.0.0/16", "51.140.0.0/14", "51.144.0.0/15", "52.96.0.0/12", "52.112.0.0/14", "52.120.0.0/14", "52.125.0.0/16", "52.126.0.0/15", "52.130.0.0/15", "52.132.0.0/14", "52.136.0.0/13", "52.145.0.0/16", "52.146.0.0/15", "52.148.0.0/14", "52.152.0.0/13", "52.160.0.0/11", "52.224.0.0/11", "64.4.0.0/18", "65.52.0.0/14", "66.119.144.0/20", "68.18.0.0/15", "68.154.0.0/15", "68.210.0.0/15", "68.218.0.0/15", "68.220.0.0/15", "70.37.0.0/17", "70.37.128.0/18", "70.152.0.0/15", "70.156.0.0/15", "72.144.0.0/14", "72.152.0.0/14", "74.160.0.0/14", "74.176.0.0/14", "74.224.0.0/14", "74.234.0.0/15", "74.240.0.0/14", "74.248.0.0/15", "91.190.216.0/21", "94.245.64.0/18", "98.64.0.0/14", "98.70.0.0/15", "102.37.0.0/16", "102.133.0.0/16", "103.9.8.0/22", "103.25.156.0/24", "103.25.157.0/24", "103.25.158.0/23", "103.36.96.0/22", "103.255.140.0/22", "104.40.0.0/13", "104.146.0.0/15", "104.208.0.0/13", "108.140.0.0/14", "111.221.16.0/20", "111.221.64.0/18", "128.94.0.0/16", "129.75.0.0/16", "131.107.0.0/16", "131.253.1.0/24", "131.253.3.0/24", "131.253.5.0/24", "131.253.6.0/24", "131.253.8.0/24", "131.253.12.0/22", "131.253.16.0/23", "131.253.18.0/24", "131.253.21.0/24", "131.253.22.0/23", "131.253.24.0/21", "131.253.32.0/20", "131.253.61.0/24", "131.253.62.0/23", "131.253.64.0/18", "131.253.128.0/17", "132.164.0.0/16", "132.245.0.0/16", "134.170.0.0/16", "134.177.0.0/16", "135.130.0.0/16", "135.149.0.0/16", "137.116.0.0/15", "137.135.0.0/16", "138.91.0.0/16", "138.105.0.0/16", "138.196.0.0/16", "138.239.0.0/16", "139.217.0.0/16", "139.219.0.0/16", "141.251.0.0/16", "143.64.0.0/16", "146.147.0.0/16", "147.145.0.0/16", "147.243.0.0/16", "148.7.0.0/16", "150.171.0.0/16", "150.242.48.0/22", "155.62.0.0/16", "157.31.0.0/16", "157.54.0.0/15", "157.56.0.0/14", "157.60.0.0/16", "158.23.0.0/16", "158.158.0.0/16", "159.27.0.0/16", "159.128.0.0/16", "163.228.0.0/16", "167.105.0.0/16", "167.220.0.0/16", "168.61.0.0/16", "168.62.0.0/15", "169.138.0.0/16", "170.165.0.0/16", "172.160.0.0/11", "172.200.0.0/13", "172.208.0.0/13", "191.232.0.0/13", "192.32.0.0/16", "192.48.225.0/24", "192.84.159.0/24", "192.84.160.0/23", "192.197.157.0/24", "192.237.67.0/24", "193.149.64.0/19", "193.221.113.0/24", "194.69.96.0/19", "194.110.197.0/24", "195.134.224.0/19", "198.105.232.0/22", "198.137.97.0/24", "198.180.95.0/24", "198.180.96.0/23", "198.200.130.0/24", "198.206.164.0/24", "199.30.16.0/20", "199.60.28.0/24", "199.74.210.0/24", "199.103.90.0/23", "199.103.122.0/24", "199.242.32.0/20", "199.242.48.0/21", "202.89.224.0/20", "204.13.120.0/21", "204.14.180.0/22", "204.79.135.0/24", "204.79.179.0/24", "204.79.181.0/24", "204.79.188.0/24", "204.79.195.0/24", "204.79.196.0/23", "204.79.252.0/24", "204.152.18.0/23", "204.152.140.0/23", "204.231.192.0/24", "204.231.194.0/23", "204.231.197.0/24", "204.231.198.0/23", "204.231.200.0/21", "204.231.208.0/20", "204.231.236.0/24", "205.174.224.0/20", "206.138.168.0/21", "206.191.224.0/19", "207.46.0.0/16", "207.68.128.0/18", "208.68.136.0/21", "208.76.44.0/22", "208.84.0.0/21", "209.240.192.0/19", "213.199.128.0/18", "216.32.180.0/22", "216.220.208.0/20") - $linuxUpdateServerHostname = "LINUX-UPDATES-SHM-$($shm.id)".ToUpper() - $shm.monitoring = [ordered]@{ - rg = "$($shm.rgPrefix)_MONITORING".ToUpper() - automationAccount = [ordered]@{ - name = "shm-$($shm.id)-automation".ToLower() - } - loggingWorkspace = [ordered]@{ - name = "shm-$($shm.id)-loganalytics".ToLower() - } - privatelink = [ordered]@{ - name = "shm-$($shm.id)-privatelinkscope".ToLower() - } - updateServers = [ordered]@{ - externalIpAddresses = [ordered]@{ - linux = ( - @("72.32.157.246", "87.238.57.227", "147.75.85.69", "217.196.149.55") + # apt.postgresql.org - @("91.189.88.0/21", "185.125.188.0/22") + # archive.ubuntu.com, changelogs.ubuntu.com, security.ubuntu.com - $cloudFlareIpAddresses + # database.clamav.net, packages.gitlab.com and qgis.org use Cloudflare - $cloudFrontIpAddresses + # packages.gitlab.com uses Cloudfront to host its Release file - @("104.131.190.124") + # dbeaver.io - @("152.199.20.126") + # developer.download.nvidia.com - $microsoftIpAddresses # packages.microsoft.com, azure.archive.ubuntu.com - ) - windows = @($microsoftIpAddresses) # for several Microsoft-owned endpoints - } - linux = [ordered]@{ - adminPasswordSecretName = "shm-$($shm.id)-vm-admin-password-linux-update-server".ToLower() - disks = [ordered]@{ - os = [ordered]@{ - sizeGb = "64" - type = $shm.diskTypeDefault - } - } - hostname = $linuxUpdateServerHostname - ip = Get-NextAvailableIpInRange -IpRangeCidr $shm.network.vnet.subnets.updateServers.cidr -Offset 4 - vmName = $linuxUpdateServerHostname - vmSize = "Standard_B2ms" - } - schedule = [ordered]@{ - daily_definition_updates = [ordered]@{ - hour = "01" - minute = "01" - } - weekly_system_updates = [ordered]@{ - day = "Tuesday" - hour = "02" - minute = "02" - } - } - } - } - - # Firewall config - # --------------- - $shm.firewall = [ordered]@{ - name = "FIREWALL-SHM-$($shm.id)".ToUpper() - routeTableName = "ROUTE-TABLE-SHM-$($shm.id)".ToUpper() - } - - # Secrets config - # -------------- - $shm.keyVault = [ordered]@{ - rg = "$($shm.rgPrefix)_SECRETS".ToUpper() - name = "kv-shm-$($shm.id)".ToLower() | Limit-StringLength -MaximumLength 24 - secretNames = [ordered]@{ - aadEmergencyAdminUsername = "shm-$($shm.id)-aad-emergency-admin-username".ToLower() - aadEmergencyAdminPassword = "shm-$($shm.id)-aad-emergency-admin-password".ToLower() - buildImageAdminUsername = "shm-$($shm.id)-buildimage-admin-username".ToLower() - buildImageAdminPassword = "shm-$($shm.id)-buildimage-admin-password".ToLower() - domainAdminUsername = "shm-$($shm.id)-domain-admin-username".ToLower() - domainAdminPassword = "shm-$($shm.id)-domain-admin-password".ToLower() - vmAdminUsername = "shm-$($shm.id)-vm-admin-username".ToLower() - vpnCaCertificate = "shm-$($shm.id)-vpn-ca-cert".ToLower() - vpnCaCertificatePlain = "shm-$($shm.id)-vpn-ca-cert-plain".ToLower() - vpnCaCertPassword = "shm-$($shm.id)-vpn-ca-cert-password".ToLower() - vpnClientCertificate = "shm-$($shm.id)-vpn-client-cert".ToLower() - vpnClientCertPassword = "shm-$($shm.id)-vpn-client-cert-password".ToLower() - } - } - - # SHM users - # --------- - $shm.users = [ordered]@{ - computerManagers = [ordered]@{ - databaseServers = [ordered]@{ - name = "$($shm.domain.netbiosName) Database Servers Manager" - samAccountName = "$($shm.id)databasesrvrs".ToLower() | Limit-StringLength -MaximumLength 20 - passwordSecretName = "shm-$($shm.id)-computer-manager-password-database-servers".ToLower() - } - identityServers = [ordered]@{ - name = "$($shm.domain.netbiosName) Identity Servers Manager" - samAccountName = "$($shm.id)identitysrvrs".ToLower() | Limit-StringLength -MaximumLength 20 - passwordSecretName = "shm-$($shm.id)-computer-manager-password-identity-servers".ToLower() - } - linuxServers = [ordered]@{ - name = "$($shm.domain.netbiosName) Linux Servers Manager" - samAccountName = "$($shm.id)linuxsrvrs".ToLower() | Limit-StringLength -MaximumLength 20 - passwordSecretName = "shm-$($shm.id)-computer-manager-password-linux-servers".ToLower() - } - } - serviceAccounts = [ordered]@{ - aadLocalSync = [ordered]@{ - name = "$($shm.domain.netbiosName) Local AD Sync Administrator" - samAccountName = "$($shm.id)localadsync".ToLower() | Limit-StringLength -MaximumLength 20 - passwordSecretName = "shm-$($shm.id)-aad-localsync-password".ToLower() - usernameSecretName = "shm-$($shm.id)-aad-localsync-username".ToLower() - } - } - } - - # Domain controller config - # ------------------------ - $hostname = "DC1-SHM-$($shm.id)".ToUpper() | Limit-StringLength -MaximumLength 15 - $shm.dc = [ordered]@{ - rg = "$($shm.rgPrefix)_DC".ToUpper() - vmName = $hostname - vmSize = "Standard_D2s_v3" - hostname = $hostname - hostnameLower = $hostname.ToLower() - hostnameUpper = $hostname.ToUpper() - fqdn = "${hostname}.$($shm.domain.fqdn)".ToLower() - ip = Get-NextAvailableIpInRange -IpRangeCidr $shm.network.vnet.subnets.identity.cidr -Offset 4 - external_dns_resolver = "168.63.129.16" # https://docs.microsoft.com/en-us/azure/virtual-network/what-is-ip-address-168-63-129-16 - installationDirectory = "C:\Installation" - adDirectory = "C:\ActiveDirectory" - safemodePasswordSecretName = "shm-$($shm.id)-vm-safemode-password-dc".ToLower() - disks = [ordered]@{ - os = [ordered]@{ - sizeGb = "128" - type = $shm.diskTypeDefault - } - } - } - - # Backup domain controller config - # ------------------------------- - $hostname = "DC2-SHM-$($shm.id)".ToUpper() | Limit-StringLength -MaximumLength 15 - $shm.dcb = [ordered]@{ - vmName = $hostname - hostname = $hostname - fqdn = "${hostname}.$($shm.domain.fqdn)".ToLower() - ip = Get-NextAvailableIpInRange -IpRangeCidr $shm.network.vnet.subnets.identity.cidr -Offset 5 - } - - # NPS config - # ---------- - $hostname = "NPS-SHM-$($shm.id)".ToUpper() | Limit-StringLength -MaximumLength 15 - $shm.nps = [ordered]@{ - adminPasswordSecretName = "shm-$($shm.id)-vm-admin-password-nps".ToLower() - rg = "$($shm.rgPrefix)_NPS".ToUpper() - vmName = $hostname - vmSize = "Standard_D2s_v3" - hostname = $hostname - ip = Get-NextAvailableIpInRange -IpRangeCidr $shm.network.vnet.subnets.identity.cidr -Offset 6 - installationDirectory = "C:\Installation" - disks = [ordered]@{ - os = [ordered]@{ - sizeGb = "128" - type = $shm.diskTypeDefault - } - } - } - - # Storage config - # -------------- - if ($shm.id.Contains("-")) { - Add-LogMessage -Level Warning "The hyphen character is not allowed in storage account names and will be removed." - } - $shmStoragePrefix = "shm$($shm.id)".Replace("-", "") - $shmStorageSuffix = New-RandomLetters -SeedPhrase "$($shm.subscriptionName)$($shm.id)" - $storageRg = "$($shm.rgPrefix)_STORAGE".ToUpper() - $shm.storage = [ordered]@{ - artifacts = [ordered]@{ - rg = $storageRg - accountName = "${shmStoragePrefix}artifacts${shmStorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - containers = [ordered]@{ - shmArtifactsDC = "shm-artifacts-dc" - shmArtifactsNPS = "shm-artifacts-nps" - shmDesiredState = "shm-desired-state" - } - } - bootdiagnostics = [ordered]@{ - rg = $storageRg - accountName = "${shmStoragePrefix}bootdiags${shmStorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - } - persistentdata = [ordered]@{ - rg = "$($shm.rgPrefix)_PERSISTENT_DATA".ToUpper() - } - } - - # DNS config - # ---------- - $shm.dns = [ordered]@{ - subscriptionName = $shmConfigBase.dnsRecords.subscriptionName ? $shmConfigBase.dnsRecords.subscriptionName : $shm.subscriptionName - rg = $shmConfigBase.dnsRecords.resourceGroupName ? $shmConfigBase.dnsRecords.resourceGroupName : "$($shm.rgPrefix)_DNS_RECORDS".ToUpper() - } - - # Package repository configuration - # -------------------------------- - $shm.repositories = [ordered]@{ - rg = "$($shm.rgPrefix)_PACKAGE_REPOSITORIES".ToUpper() - } - foreach ($tier in @(2, 3)) { - $shm.repositories["tier${tier}"] = [ordered]@{} - # Tier 2 defaults to using a proxy unless otherwise specified - if ($tier -eq 2) { - $LocalRepositoryTypes = ($shmConfigBase.repositoryType.tier2 -and ($shmConfigBase.repositoryType.tier2.ToLower() -eq "mirror")) ? @("mirrorsExternal", "mirrorsInternal") : @("proxies") - } - # Tier 3 defaults to using a proxy unless otherwise specified - if ($tier -eq 3) { - $LocalRepositoryTypes = ($shmConfigBase.repositoryType.tier3 -and ($shmConfigBase.repositoryType.tier3.ToLower() -eq "mirror")) ? @("mirrorsExternal", "mirrorsInternal") : @("proxies") - } - # Tier 4 requires the use of mirrors - if ($tier -eq 4) { - $LocalRepositoryTypes = @("mirrorsExternal", "mirrorsInternal") - } - foreach ($LocalRepositoryType in $LocalRepositoryTypes) { - $shm.repositories["tier${tier}"][$LocalRepositoryType] = [ordered]@{} - $RemoteRepositories = ($LocalRepositoryType -eq "proxies") ? "many" : @("cran", "pypi") - $LocalRepositoryShort = $LocalRepositoryType.Replace("proxies", "proxy").Replace("mirrors", "mirror-") - foreach ($RemoteRepository in $RemoteRepositories) { - if ($RemoteRepository -eq "cran") { - $dataDiskSizeGb = ($tier -eq 2) ? 128 : 32 - $ipOffset = 4 - } elseif ($RemoteRepository -eq "pypi") { - $dataDiskSizeGb = ($tier -eq 2) ? 8192 : 1024 - $ipOffset = 5 - } else { - $dataDiskSizeGb = $null - $ipOffset = 6 - } - $vmName = "SHM-$($shm.id)-${RemoteRepository}-REPOSITORY-${LocalRepositoryShort}-TIER-${tier}".ToUpper() - $shm.repositories["tier${tier}"][$LocalRepositoryType][$RemoteRepository] = [ordered]@{ - adminPasswordSecretName = "shm-$($shm.id)-vm-admin-password-${RemoteRepository}-repository-${LocalRepositoryShort}-tier-${tier}".ToLower() - disks = [ordered]@{ - os = [ordered]@{ - sizeGb = 32 - type = $shm.diskTypeDefault - } - } - ipAddress = Get-NextAvailableIpInRange -IpRangeCidr $shm.network["vnetRepositoriesTier${tier}"].subnets[$LocalRepositoryType].cidr -Offset $ipOffset - vmName = $vmName - vmSize = "Standard_B2ms" - } - if ($dataDiskSizeGb) { - $shm.repositories["tier${tier}"][$LocalRepositoryType][$RemoteRepository].disks["data"] = [ordered]@{ - sizeGb = $dataDiskSizeGb - type = $shm.diskTypeDefault - } - } - if ($LocalRepositoryType -eq "proxies") { - $shm.repositories["tier${tier}"][$LocalRepositoryType][$RemoteRepository]["applicationAdminPasswordSecretName"] = "shm-$($shm.id)-application-admin-password-${RemoteRepository}-repository-${LocalRepositoryShort}-tier-${tier}".ToLower() - } - } - } - } - - # Apply overrides (if any exist) - # ------------------------------ - if ($shmConfigBase.overrides) { - Copy-HashtableOverrides -Source $shmConfigBase.overrides -Target $shm - } - - return (ConvertTo-SortedHashtable -Sortable $shm) -} -Export-ModuleMember -Function Get-ShmConfig - - -# Get a list of resource groups belonging to a particular SRE -# ----------------------------------------------------------- -function Get-ShmResourceGroups { - param( - [Parameter(Mandatory = $true, HelpMessage = "SRE config")] - [System.Collections.IDictionary]$shmConfig - ) - $originalContext = Get-AzContext - $excludedResourceGroups = Find-AllMatchingKeys -Hashtable $shmConfig.srdImage -Key "rg" - $potentialResourceGroups = Find-AllMatchingKeys -Hashtable $shmConfig -Key "rg" | Where-Object { -not $excludedResourceGroups.Contains($_) } - try { - $null = Set-AzContext -SubscriptionId $shmConfig.subscriptionName -ErrorAction Stop - $availableResourceGroups = @(Get-AzResourceGroup | Where-Object { $_.ResourceGroupName -in $potentialResourceGroups }) - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } - return $availableResourceGroups -} -Export-ModuleMember -Function Get-ShmResourceGroups - - -# Add a new SRE configuration -# --------------------------- -function Get-SreConfig { - param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId - ) - # Import minimal management config parameters from JSON config file - we can derive the rest from these - $sreConfigBase = Get-CoreConfig -shmId $shmId -sreId $sreId - - # Support for "MicrosoftRDS" has been removed. The "remotedDesktopProvider" field now defaults to "ApacheGuacamole" - if ($null -ne $sreConfigBase.remoteDesktopProvider) { - if ($sreConfigBase.remoteDesktopProvider -eq "ApacheGuacamole") { - Add-LogMessage -Level Warning "The remoteDesktopProvider configuration option has been deprecated and will be removed in the future" - } else { - Add-LogMessage -Level Fatal "Support for remote desktops other than ApacheGuacamole has been removed" - } - } - $sreConfigBase.remoteDesktopProvider = "ApacheGuacamole" - - # Secure research environment config - # ---------------------------------- - - # Setup the basic config - $config = [ordered]@{ - shm = Get-ShmConfig -shmId $sreConfigBase.shmId - sre = [ordered]@{ - id = $sreConfigBase.sreId | Limit-StringLength -MaximumLength 7 -FailureIsFatal - rgPrefix = $sreConfigBase.overrides.sre.rgPrefix ? $sreConfigBase.overrides.sre.rgPrefix : "RG_SHM_$($sreConfigBase.shmId)_SRE_$($sreConfigBase.sreId)".ToUpper() - nsgPrefix = $sreConfigBase.overrides.sre.nsgPrefix ? $sreConfigBase.overrides.sre.nsgPrefix : "NSG_SHM_$($sreConfigBase.shmId)_SRE_$($sreConfigBase.sreId)".ToUpper() - shortName = "sre-$($sreConfigBase.sreId)".ToLower() - subscriptionName = $sreConfigBase.subscriptionName - tier = $sreConfigBase.tier - remoteDesktop = [ordered]@{ - provider = $sreConfigBase.remoteDesktopProvider - } - storageTypeDefault = "Standard_GRS" - diskTypeDefault = "Standard_LRS" - } - } - $config.sre.azureAdminGroupName = $sreConfigBase.azureAdminGroupName ? $sreConfigBase.azureAdminGroupName : $config.shm.azureAdminGroupName - $config.sre.location = $config.shm.location - - # Set the default timezone to match the SHM timezone - $config.sre.time = [ordered]@{ - timezone = [ordered]@{ - linux = $config.shm.time.timezone.linux - windows = $config.shm.time.timezone.windows - } - } - - # Ensure that this tier is supported - if (-not @("0", "1", "2", "3").Contains($config.sre.tier)) { - Add-LogMessage -Level Fatal "Tier '$($config.sre.tier)' not supported (NOTE: Tier must be provided as a string in the core SRE config.)" - } - - # Domain config - # ------------- - $sreDomain = $sreConfigBase.domain ? $sreConfigBase.domain : "$($config.sre.id).$($config.shm.domain.fqdn)".ToLower() - $config.sre.domain = [ordered]@{ - dn = "DC=$($sreDomain.Replace('.',',DC='))" - fqdn = "$sreDomain".ToLower() - netbiosName = $($config.sre.id).ToUpper() | Limit-StringLength -MaximumLength 15 -FailureIsFatal - } - $config.sre.domain.securityGroups = [ordered]@{ - dataAdministrators = [ordered]@{ name = "SG $($config.sre.domain.netbiosName) Data Administrators" } - systemAdministrators = [ordered]@{ name = "SG $($config.sre.domain.netbiosName) System Administrators" } - researchUsers = [ordered]@{ name = "SG $($config.sre.domain.netbiosName) Research Users" } - } - foreach ($groupName in $config.sre.domain.securityGroups.Keys) { - $config.sre.domain.securityGroups[$groupName].description = $config.sre.domain.securityGroups[$groupName].name - } - - # Network config - # -------------- - # Deconstruct base address prefix to allow easy construction of IP based parameters - $srePrefixOctets = $sreConfigBase.ipPrefix.Split('.') - $sreBasePrefix = "$($srePrefixOctets[0]).$($srePrefixOctets[1])" - $sreThirdOctet = $srePrefixOctets[2] - $config.sre.network = [ordered]@{ - vnet = [ordered]@{ - rg = "$($config.sre.rgPrefix)_NETWORKING".ToUpper() - name = "VNET_SHM_$($config.shm.id)_SRE_$($config.sre.id)".ToUpper() - cidr = "${sreBasePrefix}.${sreThirdOctet}.0/21" - subnets = [ordered]@{ - deployment = [ordered]@{ - name = "DeploymentSubnet" - cidr = "${sreBasePrefix}.$([int]$sreThirdOctet).0/24" - nsg = [ordered]@{ - name = "$($config.sre.nsgPrefix)_DEPLOYMENT".ToUpper() - rules = "sre-nsg-rules-deployment.json" - } - } - remoteDesktop = [ordered]@{ # note that further details are added below - name = "RemoteDesktopSubnet" - cidr = "${sreBasePrefix}.$([int]$sreThirdOctet + 1).0/24" - } - data = [ordered]@{ - name = "PrivateDataSubnet" - cidr = "${sreBasePrefix}.$([int]$sreThirdOctet + 2).0/24" - } - databases = [ordered]@{ - name = "DatabasesSubnet" - cidr = "${sreBasePrefix}.$([int]$sreThirdOctet + 3).0/24" - nsg = [ordered]@{ - name = "$($config.sre.nsgPrefix)_DATABASES".ToUpper() - rules = "sre-nsg-rules-databases.json" - } - } - compute = [ordered]@{ - name = "ComputeSubnet" - cidr = "${sreBasePrefix}.$([int]$sreThirdOctet + 4).0/24" - nsg = [ordered]@{ - name = "$($config.sre.nsgPrefix)_COMPUTE".ToUpper() - rules = "sre-nsg-rules-compute.json" - } - } - webapps = [ordered]@{ - name = "WebappsSubnet" - cidr = "${sreBasePrefix}.$([int]$sreThirdOctet + 5).0/24" - nsg = [ordered]@{ - name = "$($config.sre.nsgPrefix)_WEBAPPS".ToUpper() - rules = "sre-nsg-rules-webapps.json" - } - } - } - } - } - - # Firewall config - # --------------- - $config.sre.firewall = [ordered]@{ - routeTableName = "ROUTE-TABLE-SRE-$($config.sre.id)".ToUpper() - } - - # Storage config - # -------------- - if ($config.sre.id.Contains("-")) { - Add-LogMessage -Level Warning "The hyphen character is not allowed in storage account names and will be removed." - } - $storageRg = "$($config.sre.rgPrefix)_STORAGE".ToUpper() - $sreStoragePrefix = "$($config.shm.id)$($config.sre.id)".Replace("-", "") - $sreStorageSuffix = New-RandomLetters -SeedPhrase "$($config.sre.subscriptionName)$($config.sre.id)" - $config.sre.storage = [ordered]@{ - accessPolicies = [ordered]@{ - readOnly = [ordered]@{ - permissions = "rl" - } - readWrite = [ordered]@{ - permissions = "racwdl" - } - } - artifacts = [ordered]@{ - account = [ordered]@{ - name = "${sreStoragePrefix}artifacts${sreStorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - storageKind = "BlobStorage" - performance = $config.sre.storageTypeDefault # see https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#types-of-storage-accounts for allowed types - accessTier = "Cool" - allowedIpAddresses = $sreConfigBase.deploymentIpAddresses ? @($sreConfigBase.deploymentIpAddresses) : "any" - } - containers = [ordered]@{ - sreArtifactsRDS = "sre-artifacts-rds" - sreScriptsRDS = "sre-scripts-rds" - } - rg = $storageRg - } - bootdiagnostics = [ordered]@{ - accountName = "${sreStoragePrefix}bootdiags${sreStorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - rg = $storageRg - } - userdata = [ordered]@{ - account = [ordered]@{ - name = "${sreStoragePrefix}userdata${sreStorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - storageKind = "FileStorage" - performance = $config.sre.storageTypeDefault.Contains("LRS") ? "Premium_LRS" : "Premium_ZRS" # see https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#types-of-storage-accounts for allowed types - accessTier = "Hot" - rg = $storageRg - } - containers = [ordered]@{ - shared = [ordered]@{ - accessPolicyName = "readWrite" - mountType = "NFS" - sizeGb = "1024" - } - home = [ordered]@{ - accessPolicyName = "readWrite" - mountType = "NFS" - sizeGb = "1024" - } - } - } - persistentdata = [ordered]@{ - account = [ordered]@{ - name = "${sreStoragePrefix}data${srestorageSuffix}".ToLower() | Limit-StringLength -MaximumLength 24 -Silent - storageKind = "StorageV2" - performance = $config.sre.storageTypeDefault # see https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#types-of-storage-accounts for allowed types - accessTier = "Hot" - allowedIpAddresses = $sreConfigBase.dataAdminIpAddresses ? @($sreConfigBase.dataAdminIpAddresses) : $shm.srdImage.build.nsg.allowedIpAddresses - } - containers = [ordered]@{ - backup = [ordered]@{ - accessPolicyName = "readWrite" - mountType = "BlobSMB" - } - ingress = [ordered]@{ - accessPolicyName = "readOnly" - mountType = "BlobSMB" - } - egress = [ordered]@{ - accessPolicyName = "readWrite" - mountType = "BlobSMB" - } - } - } - } - foreach ($containerName in $config.sre.storage.persistentdata.containers.Keys) { - $config.sre.storage.persistentdata.containers[$containerName].connectionSecretName = "sre-$($config.sre.id)-data-${containerName}-connection-$($config.sre.storage.persistentdata.containers[$containerName].accessPolicyName)".ToLower() - } - - - # Backup config - # ------------- - $config.sre.backup = [ordered]@{ - rg = "$($config.sre.rgPrefix)_BACKUP".ToUpper() - vault = [ordered]@{ - name = "bv-$($config.shm.id)-sre-$($config.sre.id)" - } - blob = [ordered]@{ - policy_name = "blobbackuppolicy" - } - disk = [ordered]@{ - policy_name = "diskbackuppolicy" - } - } - - # Secrets config - # -------------- - $config.sre.keyVault = [ordered]@{ - name = "kv-$($config.shm.id)-sre-$($config.sre.id)".ToLower() | Limit-StringLength -MaximumLength 24 - rg = "$($config.sre.rgPrefix)_SECRETS".ToUpper() - secretNames = [ordered]@{ - adminUsername = "$($config.sre.shortName)-vm-admin-username" - letsEncryptCertificate = "$($config.sre.shortName)-lets-encrypt-certificate" - npsSecret = "$($config.sre.shortName)-other-nps-secret" - } - } - - # SRE users - # --------- - $config.sre.users = [ordered]@{ - serviceAccounts = [ordered]@{ - ldapSearch = [ordered]@{ - name = "$($config.sre.domain.netbiosName) LDAP Search Service Account" - samAccountName = "$($config.sre.id)ldapsearch".ToLower() | Limit-StringLength -MaximumLength 20 - passwordSecretName = "$($config.sre.shortName)-other-service-account-password-ldap-search" - } - postgres = [ordered]@{ - name = "$($config.sre.domain.netbiosName) Postgres DB Service Account" - samAccountName = "$($config.sre.id)dbpostgres".ToLower() | Limit-StringLength -MaximumLength 20 - passwordSecretName = "$($config.sre.shortName)-db-service-account-password-postgres" - } - } - } - - # Apache Guacamole remote desktop - # ------------------------------- - $config.sre.remoteDesktop.rg = "$($config.sre.rgPrefix)_REMOTE_DESKTOP".ToUpper() - if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - $config.sre.network.vnet.subnets.remoteDesktop.nsg = [ordered]@{ - name = "$($config.sre.nsgPrefix)_GUACAMOLE".ToUpper() - rules = "sre-nsg-rules-guacamole.json" - } - $config.sre.remoteDesktop.guacamole = [ordered]@{ - adminPasswordSecretName = "$($config.sre.shortName)-vm-admin-password-guacamole" - databaseAdminPasswordSecretName = "$($config.sre.shortName)-db-admin-password-guacamole" - vmName = "GUACAMOLE-SRE-$($config.sre.id)".ToUpper() - vmSize = "Standard_DS2_v2" - ip = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.remoteDesktop.cidr -Offset 4 - disks = [ordered]@{ - os = [ordered]@{ - sizeGb = "128" - type = $config.sre.diskTypeDefault - } - } - } - } else { - Add-LogMessage -Level Fatal "Remote desktop type '$($config.sre.remoteDesktop.type)' was not recognised!" - } - # Construct the hostname and FQDN for each VM - foreach ($server in $config.sre.remoteDesktop.Keys) { - if (-not $config.sre.remoteDesktop[$server].vmName) { continue } - $config.sre.remoteDesktop[$server].hostname = $config.sre.remoteDesktop[$server].vmName - $config.sre.remoteDesktop[$server].fqdn = "$($config.sre.remoteDesktop[$server].vmName).$($config.shm.domain.fqdn)".ToLower() - } - - # Set the appropriate tier-dependent network rules for the remote desktop server - # ------------------------------------------------------------------------------ - $config.sre.remoteDesktop.networkRules = [ordered]@{} - # Inbound: which IPs can access the Safe Haven (if 'default' is given then apply sensible defaults) - if (@("anywhere", "all", "internet").Contains($sreConfigBase.inboundAccessFrom.ToLower())) { - $config.sre.remoteDesktop.networkRules.allowedSources = "Internet" - } else { - $config.sre.remoteDesktop.networkRules.allowedSources = @($sreConfigBase.inboundAccessFrom) - } - # Outbound: whether internet access is allowed (if 'default' is given then apply sensible defaults) - if ($sreConfigBase.outboundInternetAccess -eq "default") { - if (@("0", "1").Contains($config.sre.tier)) { - $config.sre.remoteDesktop.networkRules.outboundInternet = "Allow" - } elseif (@("2", "3", "4").Contains($config.sre.tier)) { - $config.sre.remoteDesktop.networkRules.outboundInternet = "Deny" - } - } elseif (@("yes", "allow", "permit").Contains($sreConfigBase.outboundInternetAccess.ToLower())) { - $config.sre.remoteDesktop.networkRules.outboundInternet = "Allow" - } elseif (@("no", "deny", "forbid").Contains($sreConfigBase.outboundInternetAccess.ToLower())) { - $config.sre.remoteDesktop.networkRules.outboundInternet = "Deny" - } else { - $config.sre.remoteDesktop.networkRules.outboundInternet = @($sreConfigBase.outboundInternet) - } - # Copy-and-paste - if (@("0", "1").Contains($config.sre.tier)) { - $config.sre.remoteDesktop.networkRules.copyAllowed = $true - $config.sre.remoteDesktop.networkRules.pasteAllowed = $true - } elseif (@("2", "3", "4").Contains($config.sre.tier)) { - $config.sre.remoteDesktop.networkRules.copyAllowed = $false - $config.sre.remoteDesktop.networkRules.pasteAllowed = $false - } - # Since we cannot 'Allow' the AzurePlatformDNS endpoint we set this flag which can be used to turn-off the section in the mustache template - $config.sre.remoteDesktop.networkRules.includeAzurePlatformDnsRule = ($config.sre.remoteDesktop.networkRules.outboundInternet -ne "Allow") - - - # CodiMD and Gitlab servers - # ------------------------- - $config.sre.webapps = [ordered]@{ - rg = "$($config.sre.rgPrefix)_WEBAPPS".ToUpper() - codimd = [ordered]@{ - adminPasswordSecretName = "$($config.sre.shortName)-vm-admin-password-codimd" - hostname = "CODIMD" - vmSize = "Standard_D2s_v3" - ip = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.webapps.cidr -Offset 6 - osVersion = "Ubuntu-latest" - codimd = [ordered]@{ - dockerVersion = "2.5.3" - } - postgres = [ordered]@{ - passwordSecretName = "$($config.sre.shortName)-other-codimd-password-postgresdb" - dockerVersion = "16-alpine" - } - disks = [ordered]@{ - data = [ordered]@{ - sizeGb = "512" - type = $config.sre.diskTypeDefault - } - os = [ordered]@{ - sizeGb = "32" - type = $config.sre.diskTypeDefault - } - } - } - gitlab = [ordered]@{ - adminPasswordSecretName = "$($config.sre.shortName)-vm-admin-password-gitlab" - hostname = "GITLAB" - vmSize = "Standard_D2s_v3" - ip = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.webapps.cidr -Offset 5 - rootPasswordSecretName = "$($config.sre.shortName)-other-gitlab-root-password" - osVersion = "Ubuntu-latest" - disks = [ordered]@{ - data = [ordered]@{ - sizeGb = "512" - type = $config.sre.diskTypeDefault - } - os = [ordered]@{ - sizeGb = "32" - type = $config.sre.diskTypeDefault - } - } - } - } - # Construct the hostname and FQDN for each VM - foreach ($server in $config.sre.webapps.Keys) { - if ($config.sre.webapps[$server] -IsNot [System.Collections.Specialized.OrderedDictionary]) { continue } - $config.sre.webapps[$server].fqdn = "$($config.sre.webapps[$server].hostname).$($config.sre.domain.fqdn)".ToLower() - $config.sre.webapps[$server].vmName = "$($config.sre.webapps[$server].hostname)-SRE-$($config.sre.id)".ToUpper() - } - - - # Databases - # --------- - $config.sre.databases = [ordered]@{ - rg = "$($config.sre.rgPrefix)_DATABASES".ToUpper() - enabled = $False - instances = @() - } - $dbConfig = @{ - MSSQL = @{port = "1433"; prefix = "MSSQL"; sku = "sqldev-gen2" } - PostgreSQL = @{port = "5432"; prefix = "PSTGRS"; sku = "Ubuntu-latest" } - } - $ipOffset = 4 - foreach ($databaseType in $sreConfigBase.databases) { - if (-not @($dbConfig.Keys).Contains($databaseType)) { - Add-LogMessage -Level Fatal "Database type '$databaseType' was not recognised!" - } - $databasePorts += $dbConfig[$databaseType].port - $config.sre.databases.instances += [ordered]@{ - adminPasswordSecretName = "$($config.sre.shortName)-vm-admin-password-$($databaseType.ToLower())" - dbAdminUsernameSecretName = "$($config.sre.shortName)-db-admin-username-$($databaseType.ToLower())" - dbAdminPasswordSecretName = "$($config.sre.shortName)-db-admin-password-$($databaseType.ToLower())" - vmName = "$($dbConfig[$databaseType].prefix)-$($config.sre.id)".ToUpper() | Limit-StringLength -MaximumLength 15 - type = $databaseType - ip = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.databases.cidr -Offset $ipOffset - port = $dbConfig[$databaseType].port - sku = $dbConfig[$databaseType].sku - subnet = "databases" - vmSize = "Standard_DS2_v2" - disks = [ordered]@{ - data = [ordered]@{ - sizeGb = "1024" - type = $config.sre.diskTypeDefault - } - os = [ordered]@{ - sizeGb = "128" - type = $config.sre.diskTypeDefault - } - } - } - $config.sre.databases.enabled = $True - if ($databaseType -eq "MSSQL") { $config.sre.databases.instances[-1].enableSSIS = $true } - $ipOffset += 1 - } - - # Secure Research Desktop VMs - # --------------------------- - $config.sre.srd = [ordered]@{ - adminPasswordSecretName = "$($config.sre.shortName)-vm-admin-password-compute" - rg = "$($config.sre.rgPrefix)_COMPUTE".ToUpper() - vmImage = [ordered]@{ - type = $sreConfigBase.computeVmImage.type - version = $sreConfigBase.computeVmImage.version - } - vmSizeDefault = "Standard_D2s_v3" - disks = [ordered]@{ - os = [ordered]@{ - sizeGb = "default" - type = "StandardSSD_LRS" # explicitly override defaults and use SSD for SRD disks - } - scratch = [ordered]@{ - sizeGb = "1024" - type = "StandardSSD_LRS" # explicitly override defaults and use SSD for SRD disks - } - } - } - - # Package repositories - # -------------------- - if (@(0, 1).Contains([int]$config.sre.tier)) { - # For tiers 0 and 1 use pypi.org and cran.r-project.org directly. - $pypiUrl = "https://pypi.org" - $cranUrl = "https://cran.r-project.org" - $repositoryVNetName = $null - $repositoryVNetCidr = $null - } else { - # If using the Nexus proxy then the two repositories are hosted on the same VM - $repositoryConfig = $config.shm.repositories["tier$($config.sre.tier)"] - if ($repositoryConfig.proxies) { - $cranUrl = "http://$($repositoryConfig.proxies.many.ipAddress):80/repository/cran-proxy" - $pypiUrl = "http://$($repositoryConfig.proxies.many.ipAddress):80/repository/pypi-proxy" - $repositoryVNetName = $config.shm.network["vnetRepositoriesTier$($config.sre.tier)"].name - $repositoryVNetCidr = $config.shm.network["vnetRepositoriesTier$($config.sre.tier)"].cidr - # Repository mirrors use port 3128 (PyPI) or port 80 (CRAN) - } elseif ($repositoryConfig.mirrorsInternal) { - $cranUrl = "http://$($repositoryConfig.mirrorsInternal.cran.ipAddress)" - $pypiUrl = "http://$($repositoryConfig.mirrorsInternal.pypi.ipAddress):3128" - $repositoryVNetName = $config.shm.network["vnetRepositoriesTier$($config.sre.tier)"].name - $repositoryVNetCidr = $config.shm.network["vnetRepositoriesTier$($config.sre.tier)"].cidr - } else { - Add-LogMessage -Level Fatal "Unknown repository source for tier $($config.sre.tier) SRE!" - } - } - # We want to extract the hostname from PyPI URLs in any of the following forms - # 1. http://10.10.2.20:3128 => 10.10.2.20 - # 2. https://pypi.org => pypi.org - # 3. http://10.10.3.10:80/repository/pypi-proxy => 10.10.3.10 - $pypiHost = ($pypiUrl -match "https*:\/\/([^:]*)([:0-9]*).*") ? $Matches[1] : "" - $pypiIndex = $config.sre.nexus ? "${pypiUrl}/pypi" : $pypiUrl - $config.sre.repositories = [ordered]@{ - cran = [ordered]@{ - url = $cranUrl - } - pypi = [ordered]@{ - host = $pypiHost - index = $pypiIndex - indexUrl = "${pypiUrl}/simple" - } - network = [ordered]@{ - name = $repositoryVNetName - cidr = $repositoryVNetCidr - } - } - - # Apply overrides (if any exist) - # ------------------------------ - if ($sreConfigBase.overrides) { - Copy-HashtableOverrides -Source $sreConfigBase.overrides -Target $config - } - - return (ConvertTo-SortedHashtable -Sortable $config) -} -Export-ModuleMember -Function Get-SreConfig - - -# Get a list of resource groups belonging to a particular SRE -# ----------------------------------------------------------- -function Get-SreResourceGroups { - param( - [Parameter(Mandatory = $true, HelpMessage = "SRE config")] - [System.Collections.IDictionary]$sreConfig - ) - $originalContext = Get-AzContext - $potentialResourceGroups = Find-AllMatchingKeys -Hashtable $sreConfig.sre -Key "rg" - try { - $null = Set-AzContext -SubscriptionId $sreConfig.sre.subscriptionName -ErrorAction Stop - $availableResourceGroups = @(Get-AzResourceGroup | Where-Object { $_.ResourceGroupName -in $potentialResourceGroups }) - } finally { - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } - return $availableResourceGroups -} -Export-ModuleMember -Function Get-SreResourceGroups - - -# Show SRE or SHM full config -# --------------------- -function Show-FullConfig { - param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "Enter SRE ID")] - [string]$sreId - ) - # Generate and return the full config for the SHM or SRE - if ($sreId -eq "") { - $config = Get-ShmConfig -shmId $shmId - } else { - $config = Get-SreConfig -shmId ${shmId} -sreId ${sreId} - } - Write-Output ($config | ConvertTo-Json -Depth 99) -} -Export-ModuleMember -Function Show-FullConfig diff --git a/deployment/common/Cryptography.psm1 b/deployment/common/Cryptography.psm1 deleted file mode 100644 index 4d064dba65..0000000000 --- a/deployment/common/Cryptography.psm1 +++ /dev/null @@ -1,101 +0,0 @@ -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - -# Purge a secret from the keyvault -# -------------------------------- -function Get-SslCipherSuites { - # Start with 'recommended' ciphers from ciphersuite.info - $httpResponse = Invoke-RestMethod -Uri https://ciphersuite.info/api/cs/security/recommended -ErrorAction Stop - $recommended = $httpResponse.ciphersuites - - # ... however we also need at least one cipher from the 'secure' list since none of the 'recommended' ciphers are supported by TLS 1.2 - # We take the ones recommended by SSL Labs (https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best-Practices) - $response = Invoke-RestMethod -Uri https://ciphersuite.info/api/cs/security/secure -ErrorAction Stop - $ssllabsRecommended = @( - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", - "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_DHE_RSA_WITH_AES_256_CBC_SHA25" - ) - $secure = $response.ciphersuites | Where-Object { $ssllabsRecommended.Contains($_.PSObject.Properties.Name) } - - # Construct a list of names in both OpenSSL and TLS format - $allowedCiphers = @($secure) + @($recommended) - return @{ - openssl = @($allowedCiphers | ForEach-Object { $_.PSObject.Properties.Value.openssl_name } | Where-Object { $_ }) - tls = @($allowedCiphers | ForEach-Object { $_.PSObject.Properties.Name } | Where-Object { $_ }) - } -} -Export-ModuleMember -Function Get-SslCipherSuites - - -# Generate a random alphanumeric password -# This gives a verifiably flat distribution across the characters in question -# We introduce bias by the password requirements which increase the proportion of digits -# -------------------------------------------------------------------------------------- -function New-Password { - param( - [int]$Length = 20 - ) - # Construct allowed character set - $alphaNumeric = [char[]](1..127) -match "[0-9A-Za-z]" -join "" - $rangeSize = $alphaNumeric.Length - 1 - - # Initialise common parameters - $cryptoRng = [System.Security.Cryptography.RandomNumberGenerator]::Create() - $fourByteArray = [System.Byte[]]::CreateInstance([System.Byte], 4) - $maxUint = [uint32]::MaxValue - $ceiling = [uint32]($maxUint - ($maxUint % $rangeSize)) # highest UInt that is evenly divisible by rangeSize - - # Convert random bytes into characters from permitted character set - $password = "" - foreach ($i in 1..$Length) { - # This should give a smoother distribution across the 0.. space than the previous method which used 'byte % ' which inherently favours lower numbers - while ($true) { - $cryptoRng.GetBytes($fourByteArray) - $randomUint = [BitConverter]::ToUInt32($fourByteArray, 0) - # Restrict to only values in the range that rangeSize divides evenly into - if ($randomUint -lt $ceiling) { - $password += $alphaNumeric[$randomUint % $rangeSize] - break - } - } - } - - # Require at least one of each character class - if (-not (($password -cmatch "[a-z]+") -and ($password -cmatch "[A-Z]+") -and ($password -cmatch "[0-9]+"))) { - $password = New-Password -Length $Length - } - return $password -} -Export-ModuleMember -Function New-Password - - -# Create a string of random letters -# Note that this is not cryptographically secure but does give a verifiably flat distribution across lower-case letters -# --------------------------------------------------------------------------------------------------------------------- -function New-RandomLetters { - param( - [int]$Length = 20, - [int]$Seed = 0, - [string]$SeedPhrase = $null - ) - if ($SeedPhrase -ne $null) { - $Seed = [bigint](($SeedPhrase).ToCharArray() | ForEach-Object { [string][int]$_ } | Join-String) % [int32]::MaxValue - } - return ( -join ((97..122) | Get-Random -SetSeed $Seed -Count $Length | ForEach-Object { [char]$_ })) -} -Export-ModuleMember -Function New-RandomLetters diff --git a/deployment/common/DataStructures.psm1 b/deployment/common/DataStructures.psm1 deleted file mode 100644 index 093a6a19dc..0000000000 --- a/deployment/common/DataStructures.psm1 +++ /dev/null @@ -1,276 +0,0 @@ -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Convert an IP address into a decimal integer -# -------------------------------------------- -function Convert-CidrSuffixToDecimalMask { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input CIDR")] - [uint32]$CidrSuffix - ) - # Construct the CIDR mask as a series of '1's followed by enough '0's to pad the length to 32 - # Now these numbers can be used one-by-one to construct a 32-bit integer - [uint32]$decimalMask = 0 - @(1) * $CidrSuffix + @(0) * (32 - $CidrSuffix) | ForEach-Object { $decimalMask = ($decimalMask -shl 1) + $_ } - return $decimalMask -} -Export-ModuleMember -Function Convert-CidrToDecimalMask - - -# Convert an IP address into a decimal integer -# -------------------------------------------- -function Convert-CidrToIpAddressRange { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input range in CIDR notation")] - [string]$IpRangeCidr, - [Parameter(Mandatory = $false, HelpMessage = "Return results as decimal values instead of IP address strings")] - [switch]$AsDecimal = $false - ) - # Split the CIDR range into the IP address prefix and the CIDR suffix - $ipAddressPrefx, $cidrSuffix = $IpRangeCidr.Split("/") - - # Convert the IP prefix and CIDR suffix to decimal - [uint32]$ipPrefixDecimal = Convert-IpAddressToDecimal -IpAddress $ipAddressPrefx - [uint32]$cidrMaskDecimal = Convert-CidrSuffixToDecimalMask -Cidr $cidrSuffix - - # Get the starting and ending IP addresses - $ipStartDecimal = $ipPrefixDecimal -band $cidrMaskDecimal - $ipEndDecimal = $ipPrefixDecimal -bor (-bnot $cidrMaskDecimal) - - # Return as decimals or IP addresses - if ($AsDecimal) { - return @($ipStartDecimal, $ipEndDecimal) - } - return @( - (Convert-DecimalToIpAddress -IpDecimal $ipStartDecimal), - (Convert-DecimalToIpAddress -IpDecimal $ipEndDecimal) - ) -} -Export-ModuleMember -Function Convert-CidrToIpAddressRange - - -# Convert a decimal integer into an IP address -# -------------------------------------------- -function Convert-DecimalToIpAddress { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input decimal value")] - [string]$IpDecimal - ) - return [System.Net.IPAddress]::parse($IpDecimal).IPAddressToString -} -Export-ModuleMember -Function Convert-DecimalToIpAddress - - -# Convert an IP address into a decimal integer -# -------------------------------------------- -function Convert-IpAddressToDecimal { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input IP address")] - [string]$IpAddress - ) - $IpOctets = Convert-IpAddressToOctets -IpAddress $IpAddress - return [uint32]($($IpOctets[0] -shl 24) + $($IpOctets[1] -shl 16) + $($IpOctets[2] -shl 8) + $IpOctets[3]) -} -Export-ModuleMember -Function Convert-IpAddressToDecimal - - -# Convert an IP address into a set of octets -# ------------------------------------------ -function Convert-IpAddressToOctets { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input IP address")] - [string]$IpAddress - ) - $nSeparators = ($IpAddress.ToCharArray() | Where-Object { $_ -eq "." } | Measure-Object).Count - if ($nSeparators -ne 3) { - Add-LogMessage -Level Fatal "Expected three dot-separators but '$IpAddress' has $nSeparators!" - } - return @($IpAddress.Split(".") | ForEach-Object { [uint32]$_ }) -} -Export-ModuleMember -Function Convert-IpAddressToOctets - - -# Convert a set of IP octets into an IP address -# --------------------------------------------- -function Convert-OctetsToIpAddress { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input IP octets")] - [uint32[]]$IpOctets - ) - if ($IpOctets.Count -ne 4) { - Add-LogMessage -Level Fatal "Expected four octets but '$IpOctets' has $($IpOctets.Count)!" - } - return $IpOctets -Join "." -} -Export-ModuleMember -Function Convert-OctetsToIpAddress - - -# Convert a file into a Base-64, GZipped string -# --------------------------------------------- -function ConvertTo-Base64GZip { - param( - [Parameter(Mandatory = $true, HelpMessage = "Path to file to be converted", ValueFromPipeline = $true)] - [ValidateNotNullOrEmpty()] - [string]$Path - ) - $xrdpCustomLogo = Get-Content $Path -Raw -AsByteStream - $outputStream = New-Object IO.MemoryStream - $gzipStream = New-Object System.IO.Compression.GZipStream($outputStream, [Io.Compression.CompressionMode]::Compress) - $gzipStream.Write($xrdpCustomLogo, 0, $xrdpCustomLogo.Length) - $gzipStream.Close() - $output = [Convert]::ToBase64String($outputStream.ToArray()) - $outputStream.Close() - return $output -} -Export-ModuleMember -Function ConvertTo-Base64GZip - - -# Convert an object to a Base-64 string -# ------------------------------------- -function ConvertTo-Base64 { - param( - [Parameter(Mandatory = $true, HelpMessage = "String to be converted to Base-64", ValueFromPipeline = $true)] - [AllowEmptyString()] - [string]$InputString - ) - return [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($InputString)) -} -Export-ModuleMember -Function ConvertTo-Base64 - - -# Convert a nested, sortable object into a sorted hashtable -# --------------------------------------------------------- -function ConvertTo-SortedHashtable { - param( - [Parameter(Mandatory = $true, HelpMessage = "Nested object to be sorted", ValueFromPipeline = $true)] - [AllowNull()][AllowEmptyString()] - $Sortable - ) - $hasKeysValues = [bool](($Sortable.PSObject.Properties.name -match "Keys") -and ($Sortable.PSObject.Properties.name -match "Values")) - if ($hasKeysValues) { - $OutputHashtable = [ordered]@{} - $Sortable.GetEnumerator() | Sort-Object -Property "Name" | ForEach-Object { $OutputHashtable.Add($_.Key, $(ConvertTo-SortedHashtable -Sortable $_.Value)) } - return $OutputHashtable - } elseif ($Sortable -is [System.Object[]]) { - return @($Sortable | ForEach-Object { ConvertTo-SortedHashtable $_ }) - } else { - return $Sortable - } -} -Export-ModuleMember -Function ConvertTo-SortedHashtable - - -# Overwrite the contents of one hash table with that of another -# ------------------------------------------------------------- -function Copy-HashtableOverrides { - param( - [Parameter(Mandatory = $true, HelpMessage = "Source hashtable")] - $Source, - [Parameter(Mandatory = $true, HelpMessage = "Target hashtable to override")] - $Target - ) - foreach ($sourcePair in $Source.GetEnumerator()) { - # If we hit a leaf then override the target with the source value - if ($sourcePair.Value -isnot [System.Collections.IDictionary]) { - $Target[$sourcePair.Key] = $sourcePair.Value - continue - } - # If the target already contains this key then continue recursively - if ($Target.Contains($sourcePair.Key)) { - Copy-HashtableOverrides $sourcePair.Value $Target[$sourcePair.Key] - # Otherwise create a new key in the target with value taken from the source - } else { - $Target[$sourcePair.Key] = $sourcePair.Value - } - } -} -Export-ModuleMember -Function Copy-HashtableOverrides - - -# Retrieve values of all keys matching the given pattern -# ------------------------------------------------------ -function Find-AllMatchingKeys { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input hashtable")] - [System.Collections.IDictionary]$Hashtable, - [Parameter(Mandatory = $true, HelpMessage = "Key to look for")] - [String]$Key - ) - $output = @() - foreach ($entryPair in $Hashtable.GetEnumerator()) { - # If we hit a matching key then add its value to the output array - if ($entryPair.Key -like "$Key") { - $output += $entryPair.Value - } - # If we find a hashtable then walk that hashtable too - elseif ($entryPair.Value -is [System.Collections.IDictionary]) { - $output += Find-AllMatchingKeys -Hashtable $entryPair.Value -Key $Key - } - } - return $output -} -Export-ModuleMember -Function Find-AllMatchingKeys - - -# Retrieve value for a (possibly) multilevel key -# ---------------------------------------------- -function Get-MultilevelKey { - param( - [Parameter(Mandatory = $true, HelpMessage = "Input hashtable")] - [System.Collections.IDictionary]$Hashtable, - [Parameter(Mandatory = $true, HelpMessage = "Key to look for")] - [String]$Key - ) - if ($Hashtable.Contains($Key)) { - return $Hashtable[$Key] - } elseif ($Key.Contains(".")) { - $keyPrefix = $Key.Split(".")[0] - if ($Hashtable.Contains($keyPrefix)) { - $keySuffix = $Key.Split(".") | Select-Object -Skip 1 | Join-String -Separator "." - return Get-MultilevelKey -Hashtable $Hashtable[$keyPrefix] -Key $keySuffix - } - } - return $null -} -Export-ModuleMember -Function Get-MultilevelKey - - -# Truncate string at a given length -# --------------------------------- -function Limit-StringLength { - param( - [Parameter(Mandatory = $True, ValueFromPipeline = $True)] - [string]$InputString, - [Parameter(Mandatory = $True)] - [int]$MaximumLength, - [Parameter(Mandatory = $false)] - [Switch]$FailureIsFatal, - [Parameter(Mandatory = $false)] - [Switch]$Silent - ) - if ($InputString.Length -le $MaximumLength) { - return $InputString - } - if ($FailureIsFatal) { - Add-LogMessage -Level Fatal "'$InputString' has length $($InputString.Length) but must not exceed $MaximumLength!" - } - if (-Not $Silent) { - Add-LogMessage -Level Warning "Truncating '$InputString' to length $MaximumLength!" - } - return $InputString[0..($MaximumLength - 1)] -join "" -} -Export-ModuleMember -Function Limit-StringLength - - -# Retrieve value for a (possibly) multilevel key -# ---------------------------------------------- -function Wait-For { - param( - [Parameter(Mandatory = $true, HelpMessage = "Number of seconds to wait for")] - [int]$Seconds, - [Parameter(Mandatory = $true, HelpMessage = "Thing that we are waiting for")] - [string]$Target - ) - 1..$Seconds | ForEach-Object { Write-Progress -Activity "Waiting $Seconds seconds for $Target..." -Status "$_ seconds elapsed" -PercentComplete (100 * $_ / $Seconds); Start-Sleep 1 } -} -Export-ModuleMember -Function Wait-For diff --git a/deployment/common/Logging.psm1 b/deployment/common/Logging.psm1 deleted file mode 100644 index 14af7654d3..0000000000 --- a/deployment/common/Logging.psm1 +++ /dev/null @@ -1,107 +0,0 @@ -# Write coloured messages using the Information stream -# Adapted from https://blog.kieranties.com/2018/03/26/write-information-with-colours -# ---------------------------------------------------------------------------------- -function Write-InformationColoured { - [CmdletBinding()] - param( - [Parameter(Mandatory)] - [Object]$MessageData, - [ConsoleColor]$ForegroundColour = $Host.UI.RawUI.ForegroundColor, # Make sure we use the current colours by default - [ConsoleColor]$BackgroundColour = $Host.UI.RawUI.BackgroundColor, - [Switch]$NoNewline - ) - - # Construct a coloured message - $msg = [System.Management.Automation.HostInformationMessage]@{ - Message = $MessageData - ForegroundColor = $ForegroundColour - BackgroundColor = $BackgroundColour - NoNewline = $NoNewline.IsPresent - } - - # Write to the information stream - # See https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_output_streams?view=powershell-7.2 - Write-Information -InformationAction "Continue" $msg -} - - -# Add a message to the log -# ------------------------ -function Add-DeploymentLogMessages { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - $ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Name of deployment to track")] - $DeploymentName, - [Parameter(Mandatory = $true, HelpMessage = "Error messages from template deployment")] - [AllowNull()] - $ErrorDetails - ) - $operations = Get-AzResourceGroupDeploymentOperation -ResourceGroupName $ResourceGroupName -DeploymentName $DeploymentName - foreach ($operation in $operations) { - $response = $operation.Properties.Response - foreach ($status in $response.content.Properties.instanceView.statuses) { - Add-LogMessage -Level Info "$($response.content.name): $($status.code)" - Write-Information -InformationAction "Continue" $status.message - } - foreach ($substatus in $response.content.Properties.instanceView.substatuses) { - Add-LogMessage -Level Info "$($response.content.name): $($substatus.code)" - Write-Information -InformationAction "Continue" $substatus.message - } - } - if ($ErrorDetails) { - foreach ($message in $ErrorDetails[0..2] ) { - Add-LogMessage -Level Failure "$message" - } - } -} -Export-ModuleMember -Function Add-DeploymentLogMessages - - -# Add a message to the log -# ------------------------ -function Add-LogMessage { - param( - [Parameter(Mandatory = $true)] - [ValidateNotNullOrEmpty()] - [string]$Message, - [Parameter(Mandatory = $false)] - [ValidateSet("Error", "Warning", "Info", "Success", "Failure", "InfoSuccess", "Fatal")] - [string]$Level = "Info", - [Parameter(Mandatory = $false)] - [Exception]$Exception - ) - # Format date for logging - $FormattedDate = Get-Date -Format "yyyy-MM-dd HH:mm:ss" - - # Write message to error, warning, or info - switch ($Level) { - "Error" { - Write-InformationColoured -ForegroundColour DarkRed "$FormattedDate [ ERROR]: $Message" - } - "Warning" { - Write-InformationColoured -ForegroundColour DarkYellow "$FormattedDate [WARNING]: $Message" - } - "Info" { - Write-InformationColoured -ForegroundColour DarkCyan "$FormattedDate [ INFO]: $Message" - } - "Success" { - Write-InformationColoured -ForegroundColour DarkGreen "$FormattedDate [SUCCESS]: [`u{2714}] $Message" - } - "Failure" { - Write-InformationColoured -ForegroundColour DarkRed "$FormattedDate [FAILURE]: [x] $Message" - } - "InfoSuccess" { - Write-InformationColoured -ForegroundColour DarkCyan "$FormattedDate [SUCCESS]: [`u{2714}] $Message" - } - "Fatal" { - Write-InformationColoured -ForegroundColour DarkRed "$FormattedDate [FAILURE]: [x] $Message" - if ($Exception) { - throw $Exception - } else { - throw "$Message" - } - } - } -} -Export-ModuleMember -Function Add-LogMessage diff --git a/deployment/common/RemoteCommands.psm1 b/deployment/common/RemoteCommands.psm1 deleted file mode 100644 index d5b9c2472f..0000000000 --- a/deployment/common/RemoteCommands.psm1 +++ /dev/null @@ -1,135 +0,0 @@ -Import-Module $PSScriptRoot/AzureCompute -ErrorAction Stop -Import-Module $PSScriptRoot/DataStructures -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Update and reboot a machine -# --------------------------- -function Invoke-WindowsConfiguration { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of VM to run on")] - [string]$VMName, - [Parameter(Mandatory = $true, HelpMessage = "Name of resource group to deploy into")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Time zone to use")] - [string]$TimeZone, - [Parameter(Mandatory = $true, HelpMessage = "NTP server to use")] - [string]$NtpServer, - [Parameter(Mandatory = $false, HelpMessage = "Additional Powershell modules")] - [string[]]$AdditionalPowershellModules = @() - ) - # Install core Powershell modules - Add-LogMessage -Level Info "[ ] Installing core Powershell modules on '$VMName'" - $corePowershellScriptPath = Join-Path $PSScriptRoot "remote" "Install_Core_Powershell_Modules.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $corePowershellScriptPath -VMName $VMName -ResourceGroupName $ResourceGroupName - # Install additional Powershell modules - if ($AdditionalPowershellModules) { - Add-LogMessage -Level Info "[ ] Installing additional Powershell modules on '$VMName'" - $additionalPowershellScriptPath = Join-Path $PSScriptRoot "remote" "Install_Additional_Powershell_Modules.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $additionalPowershellScriptPath -VMName $VMName -ResourceGroupName $ResourceGroupName -Parameter @{"ModuleNamesB64" = ($AdditionalPowershellModules | ConvertTo-Json -Depth 99 | ConvertTo-Base64) } - } - # Set locale and run update script - Add-LogMessage -Level Info "[ ] Setting time/locale and installing updates on '$VMName'" - $InstallationScriptPath = Join-Path $PSScriptRoot "remote" "Configure_Windows.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $InstallationScriptPath -VMName $VMName -ResourceGroupName $ResourceGroupName -Parameter @{"TimeZone" = "$TimeZone"; "NTPServer" = "$NtpServer"; "Locale" = "en-GB" } - # Reboot the VM - Start-VM -Name $VMName -ResourceGroupName $ResourceGroupName -ForceRestart -} -Export-ModuleMember -Function Invoke-WindowsConfiguration - - -# Update LDAP secret in the local Active Directory -# ------------------------------------------------ -function Update-AdLdapSecret { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of DC that holds the local Active Directory")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Resource group for DC that holds the local Active Directory")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Subscription name for DC that holds the local Active Directory")] - [string]$SubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Password for LDAP search account")] - [string]$LdapSearchPassword, - [Parameter(Mandatory = $true, HelpMessage = "SAM account name for LDAP search account")] - [string]$LdapSearchSamAccountName - ) - # Get original subscription - $originalContext = Get-AzContext - try { - $null = Set-AzContext -SubscriptionId $SubscriptionName -ErrorAction Stop - Add-LogMessage -Level Info "[ ] Setting LDAP secret in local AD (${Name})" - $params = @{ - ldapSearchSamAccountName = $LdapSearchSamAccountName - ldapSearchPasswordB64 = $LdapSearchPassword | ConvertTo-Base64 - } - $scriptPath = Join-Path $PSScriptRoot "remote" "ResetLdapPasswordOnAD.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $Name -ResourceGroupName $ResourceGroupName -Parameter $params - } finally { - # Switch back to original subscription - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } -} -Export-ModuleMember -Function Update-AdLdapSecret - - -# Update DNS record on the SHM for a VM -# ------------------------------------- -function Update-VMDnsRecords { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of primary DC VM")] - [string]$DcName, - [Parameter(Mandatory = $true, HelpMessage = "Resource group of primary DC VM")] - [string]$DcResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "FQDN for this SHM")] - [string]$BaseFqdn, - [Parameter(Mandatory = $true, HelpMessage = "Name of the SHM subscription")] - [string]$ShmSubscriptionName, - [Parameter(Mandatory = $true, HelpMessage = "Hostname of VM whose records need to be updated")] - [string]$VmHostname, - [Parameter(Mandatory = $true, HelpMessage = "IP address for this VM")] - [string]$VmIpAddress - ) - # Get original subscription - $originalContext = Get-AzContext - try { - $null = Set-AzContext -SubscriptionId $ShmSubscriptionName -ErrorAction Stop - Add-LogMessage -Level Info "[ ] Resetting DNS record for VM '$VmHostname'..." - $params = @{ - Fqdn = $BaseFqdn - HostName = ($VmHostname | Limit-StringLength -MaximumLength 15) - IpAddress = $VMIpAddress - } - $scriptPath = Join-Path $PSScriptRoot "remote" "ResetDNSRecord.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $DcName -ResourceGroupName $DcResourceGroupName -Parameter $params - if ($?) { - Add-LogMessage -Level Success "Resetting DNS record for VM '$VmHostname' was successful" - } else { - Add-LogMessage -Level Failure "Resetting DNS record for VM '$VmHostname' failed!" - } - } finally { - # Switch back to original subscription - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - } -} -Export-ModuleMember -Function Update-VMDnsRecords - - -# Update LDAP secret for a VM -# --------------------------- -function Update-VMLdapSecret { - param( - [Parameter(Mandatory = $true, HelpMessage = "VM name")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "VM resource group")] - [string]$ResourceGroupName, - [Parameter(Mandatory = $true, HelpMessage = "Password for LDAP search account")] - [string]$LdapSearchPassword - ) - Add-LogMessage -Level Info "[ ] Setting LDAP secret on SRD '${Name}'" - $params = @{ - ldapSearchPasswordB64 = $LdapSearchPassword | ConvertTo-Base64 - } - $scriptPath = Join-Path $PSScriptRoot "remote" "ResetLdapPasswordOnVm.sh" - $null = Invoke-RemoteScript -Shell "UnixShell" -ScriptPath $scriptPath -VMName $Name -ResourceGroupName $ResourceGroupName -Parameter $params -} -Export-ModuleMember -Function Update-VMLdapSecret diff --git a/deployment/common/Templates.psm1 b/deployment/common/Templates.psm1 deleted file mode 100644 index 4ba0cd64bf..0000000000 --- a/deployment/common/Templates.psm1 +++ /dev/null @@ -1,93 +0,0 @@ -Import-Module Poshstache -Global -ErrorAction Stop # Note that we need -Global as Poshstache uses `Get-Module` to check where it is isntalled -Import-Module $PSScriptRoot/DataStructures -ErrorAction Stop -Import-Module $PSScriptRoot/Logging -ErrorAction Stop - - -# Expand a mustache template -# Use the terminology from https://mustache.github.io/mustache.5.html -# ------------------------------------------------------------------- -function Expand-MustacheTemplate { - param( - [Parameter(Mandatory = $true, ParameterSetName = "ByFile", HelpMessage = "Mustache template to be expanded.")] - [string]$Template, - [Parameter(Mandatory = $true, ParameterSetName = "ByPath", HelpMessage = "Path to mustache template to be expanded.")] - [string]$TemplatePath, - [Parameter(Mandatory = $true, HelpMessage = "Hashtable (can be multi-level) with parameter key-value pairs.")] - [System.Collections.IDictionary]$Parameters - ) - # If we are given a path then we need to extract the content - if ($TemplatePath) { $Template = Get-Content $TemplatePath -Raw } - - # Define the delimiters - $MustacheOpen = "{" - $MustacheClose = "}" - $StartDelimiter = "${MustacheOpen}${MustacheOpen}" - $EndDelimiter = "${MustacheClose}${MustacheClose}" - - # Get all unique mustache tags - $tags = ($Template | Select-String -Pattern "$StartDelimiter[^${MustacheOpen}${MustacheClose}]*$EndDelimiter" -AllMatches).Matches.Value | ` - Where-Object { $_ -and ($_ -ne "{{.}}") } | ` - ForEach-Object { $_.Replace("#", "").Replace("/", "").Replace("?", "").Replace("^", "").Replace("&", "") } | ` - Get-Unique - - # As '.' is not an allowed character in standard Mustache syntax, we replace these with '_' in both the template and the parameter table - $PoshstacheParameters = @{} - foreach ($tag in $tags) { - $tagKey = $tag.Replace($StartDelimiter, "").Replace($EndDelimiter, "").Trim() - $normalisedTagKey = $tagKey.Replace(".", "_") - $Template = $Template.Replace($tagKey, $normalisedTagKey) - $PoshstacheParameters[$normalisedTagKey] = Get-MultilevelKey -Hashtable $Parameters -Key $tagKey - } - - # Use Poshstache to expand the template - return ConvertTo-PoshstacheTemplate -InputString $Template -ParametersObject (ConvertTo-Json -Depth 99 $PoshstacheParameters) -} -Export-ModuleMember -Function Expand-MustacheTemplate - - -# Expand a cloud-init file by inserting any referenced resources -# -------------------------------------------------------------- -function Expand-CloudInitResources { - param( - [Parameter(Mandatory = $true, ParameterSetName = "ByFile", HelpMessage = "Cloud-init template to be expanded.")] - [string]$Template, - [Parameter(Mandatory = $true, ParameterSetName = "ByPath", HelpMessage = "Path to cloud-init template to be expanded.")] - [string]$TemplatePath, - [Parameter(Mandatory = $true, HelpMessage = "Path to resource files.")] - [string]$ResourcePath - ) - # If we are given a path then we need to extract the content - if ($TemplatePath) { $Template = Get-Content $TemplatePath -Raw } - - # Insert resources into the cloud-init template - foreach ($resource in (Get-ChildItem $ResourcePath -Attributes !Directory)) { - $indent = $Template -split "`n" | Where-Object { $_ -match "{{$($resource.Name)}}" } | ForEach-Object { $_.Split("{{")[0] } | Select-Object -First 1 - $indentedContent = (Get-Content $resource.FullName -Raw -ErrorAction Stop) -split "`n" | Where-Object { $_ } | ForEach-Object { "${indent}$_" } | Join-String -Separator "`n" - $Template = $Template.Replace("${indent}{{$($resource.Name)}}", $indentedContent) - } - return $Template -} -Export-ModuleMember -Function Expand-CloudInitResources - - -# Get patched JSON from template -# ------------------------------ -function Get-JsonFromMustacheTemplate { - param( - [Parameter(Mandatory = $true, ParameterSetName = "ByFile", HelpMessage = "Mustache template to be expanded.")] - [string]$Template, - [Parameter(Mandatory = $true, ParameterSetName = "ByPath", HelpMessage = "Path to mustache template to be expanded.")] - [string]$TemplatePath, - [Parameter(Mandatory = $true, HelpMessage = "Hashtable (can be multi-level) with parameter key-value pairs.")] - [System.Collections.IDictionary]$Parameters, - [Parameter(Mandatory = $false, HelpMessage = "Return patched JSON as hashtable.")] - [switch]$AsHashtable - ) - if ($Template) { - $templateJson = Expand-MustacheTemplate -Template $Template -Parameters $Parameters - } else { - $templateJson = Expand-MustacheTemplate -TemplatePath $TemplatePath -Parameters $Parameters - } - return ($templateJson | ConvertFrom-Json -AsHashtable:$AsHashtable) -} -Export-ModuleMember -Function Get-JsonFromMustacheTemplate diff --git a/deployment/common/remote/Configure_Windows.ps1 b/deployment/common/remote/Configure_Windows.ps1 deleted file mode 100644 index fd842f7d3a..0000000000 --- a/deployment/common/remote/Configure_Windows.ps1 +++ /dev/null @@ -1,101 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $false, HelpMessage = "Time zone to use")] - [string]$TimeZone = "GMT Standard Time", - [Parameter(Mandatory = $false, HelpMessage = "NTP server to use")] - [string]$NTPServer = "time.google.com", - [Parameter(Mandatory = $false, HelpMessage = "Locale to use")] - [string]$Locale = "en-GB" -) - - -# Set locale -# ---------- -Write-Output " [ ] Setting locale..." -$GeoId = ([System.Globalization.CultureInfo]::GetCultures("InstalledWin32Cultures") | Where-Object { $_.Name -eq $Locale } | ForEach-Object { [System.Globalization.RegionInfo]$_.Name }).GeoId -Set-WinSystemLocale -SystemLocale $Locale -Set-WinHomeLocation -GeoId $GeoId -Set-Culture -CultureInfo $Locale -Set-WinUserLanguageList -LanguageList $Locale -Force -# Set-WinSystemLocale will not be applied until after a restart -# Set-Culture does not affect the current shell so we must spawn a new Powershell process -if (($(& (Get-Process -Id $PID).Path { (Get-Culture).Name }) -eq $Locale) -and ((Get-WinUserLanguageList)[0].LanguageTag -eq $Locale)) { - Write-Output " [o] Setting locale to '$Locale' succeeded" -} else { - Write-Output " ... Culture: $((Get-Culture).DisplayName)" - Write-Output " ... Home location: $((Get-WinHomeLocation).HomeLocation)" - Write-Output " ... Default language: $((Get-WinUserLanguageList)[0].Autonym)" - Write-Output " [x] Setting locale to '$Locale' failed!" -} - - -# Configure time zone -# ------------------- -if ($TimeZone) { - Write-Output " [ ] Setting time zone..." - Set-TimeZone -Name $TimeZone - if ($?) { - Write-Output " [o] Setting time zone to '$TimeZone' succeeded" - } else { - Write-Output " ... Time zone: $((Get-TimeZone).Id)" - Write-Output " [x] Setting time zone to '$TimeZone' failed!" - } -} else { - Write-Output " [x] Invalid time zone '$TimeZone' provided!" -} - - -# Configure NTP server -# These steps follow the instructions from https://support.microsoft.com/en-gb/help/816042/how-to-configure-an-authoritative-time-server-in-windows-server -# -------------------------------------------------------------------------------------------------------------------------------------------------------- -if ($NTPServer) { - Write-Output " [ ] Setting NTP server..." - $success = $true - # Change DateTime\Servers settings - # We should end up with exactly two DWORDs: 0th-server and default (pointing to 0th-server) - # ----------------------------------------------------------------------------------------- - Push-Location - Set-Location HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\DateTime\Servers - Remove-ItemProperty . -Name "*" - Set-ItemProperty . 0 $NTPServer - Set-ItemProperty . "(Default)" "0" - Pop-Location - $success = $success -and ( - (((Get-ItemProperty HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\DateTime\Servers).PSObject.Members | Where-Object { $_.MemberType -eq "NoteProperty" -and $_.Name -notlike "PS*" } | Measure-Object | Select-Object Count).Count -eq 2) -and - ((Get-ItemPropertyValue HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\DateTime\Servers -Name "0") -eq $NTPServer) -and - ((Get-ItemPropertyValue HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\DateTime\Servers -Name "(Default)") -eq "0") - ) - - # Change Services\W32Time settings - # -------------------------------- - Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Parameters Type NTP - Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Parameters NtpServer "$NTPServer,0x1" - Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Config AnnounceFlags 0xA - Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\TimeProviders\NtpServer Enabled 1 - $success = $success -and ( - ((Get-ItemPropertyValue HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Parameters -Name "Type") -eq "NTP") -and - ((Get-ItemPropertyValue HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Parameters -Name "NtpServer") -eq "$NTPServer,0x1") -and - ((Get-ItemPropertyValue HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Config -Name "AnnounceFlags") -eq 0xA) -and - ((Get-ItemPropertyValue HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\TimeProviders\NtpServer -Name "Enabled") -eq 1) - ) - - # Restart the Windows Time service - # -------------------------------- - Stop-Service W32Time - Start-Service W32Time - - # Check that settings were applied - # -------------------------------- - if ($success) { - Write-Output " [o] Setting NTP server to '$NTPServer' succeeded" - } else { - Write-Output " [x] Setting NTP server to '$NTPServer' failed!" - } -} else { - Write-Output " [x] Invalid NTP server '$NTPServer' provided!" -} diff --git a/deployment/common/remote/Install_Additional_Powershell_Modules.ps1 b/deployment/common/remote/Install_Additional_Powershell_Modules.ps1 deleted file mode 100644 index 5f1c737280..0000000000 --- a/deployment/common/remote/Install_Additional_Powershell_Modules.ps1 +++ /dev/null @@ -1,47 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $false, HelpMessage = "Base-64 encoding Powershell modules to install")] - [string]$ModuleNamesB64 = $null -) - - -# Deserialise Base-64 encoded variables -# ------------------------------------- -$moduleNames = @() -if ($ModuleNamesB64) { - $moduleNames = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($ModuleNamesB64)) | ConvertFrom-Json -} - - -# Get existing modules -# -------------------- -$existingModuleNames = Get-Module -ListAvailable | ForEach-Object { $_.Name } - - -# Install additional modules -# -------------------------- -foreach ($moduleName in $moduleNames) { - Write-Output "Installing $moduleName..." - Install-Module -Name $moduleName -AllowClobber -Force -AcceptLicense 2>&1 3>&1 | Out-Null - Update-Module -Name $moduleName -Force 2>&1 3>&1 | Out-Null - $installedModule = Get-Module -ListAvailable -Name $moduleName | Select-Object -First 1 - if ($installedModule) { - Write-Output " [o] $moduleName $($installedModule.Version.ToString()) is installed" - } else { - Write-Output " [x] Failed to install $moduleName!" - } -} - - -# Report any modules that were installed -# -------------------------------------- -Write-Output "`nNewly installed modules:" -$installedModules = Invoke-Command -ScriptBlock { Get-Module -ListAvailable | Where-Object { $_.Name -NotIn $existingModuleNames } } -foreach ($module in $installedModules) { - Write-Output " ... $($module.Name)" -} diff --git a/deployment/common/remote/Install_Core_Powershell_Modules.ps1 b/deployment/common/remote/Install_Core_Powershell_Modules.ps1 deleted file mode 100644 index 34c04ee180..0000000000 --- a/deployment/common/remote/Install_Core_Powershell_Modules.ps1 +++ /dev/null @@ -1,46 +0,0 @@ -# Get existing modules -# -------------------- -$existingModuleNames = Get-Module -ListAvailable | ForEach-Object { $_.Name } - - -# Install NuGet -# ------------- -$provider = "NuGet" -Write-Output "Installing $provider..." -Install-PackageProvider -Name $provider -Force 2>&1 | Out-Null -$installedProvider = Get-PackageProvider -ListAvailable -Name $provider | Select-Object -First 1 -if ($installedProvider) { - Write-Output " [o] $provider $($installedProvider.Version.ToString()) is installed" -} else { - Write-Output " [x] Failed to install $provider!" -} - - -# Add the PSGallery to the list of trusted repositories -# ----------------------------------------------------- -Set-PSRepository -Name PSGallery -InstallationPolicy Trusted - - -# Install core modules -# -------------------- -foreach ($moduleName in @("PackageManagement", "PowerShellGet", "PSWindowsUpdate")) { - Write-Output "Installing $moduleName..." - # NB. We skip publisher check as the PowerShellGet catalog signature is broken - Install-Module -Name $moduleName -Repository PSGallery -AllowClobber -SkipPublisherCheck -Force 2>&1 3>&1 | Out-Null - Update-Module -Name $moduleName -Force 2>&1 3>&1 | Out-Null - $installedModule = Get-Module -ListAvailable -Name $moduleName | Select-Object -First 1 - if ($installedModule) { - Write-Output " [o] $moduleName $($installedModule.Version.ToString()) is installed" - } else { - Write-Output " [x] Failed to install $moduleName!" - } -} - - -# Report any modules that were installed -# -------------------------------------- -Write-Output "`nNewly installed modules:" -$installedModules = Invoke-Command -ScriptBlock { Get-Module -ListAvailable | Where-Object { $_.Name -NotIn $existingModuleNames } } -foreach ($module in $installedModules) { - Write-Output " ... $($module.Name)" -} diff --git a/deployment/common/remote/ResetDNSRecord.ps1 b/deployment/common/remote/ResetDNSRecord.ps1 deleted file mode 100644 index 934878d312..0000000000 --- a/deployment/common/remote/ResetDNSRecord.ps1 +++ /dev/null @@ -1,42 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(HelpMessage = "FQDN for the SRE", Mandatory = $false)] - [string]$Fqdn, - [Parameter(HelpMessage = "Host name of the VM", Mandatory = $false)] - [string]$HostName, - [Parameter(HelpMessage = "Desired IP Address", Mandatory = $false)] - [string]$IpAddress -) - -# Get the existing record -$ExistingDnsRecord = Get-DnsServerResourceRecord -ZoneName $Fqdn -Name $HostName -ErrorAction SilentlyContinue - -# Create the record if it does not exist -if (-not $ExistingDnsRecord) { - try { - Add-DnsServerResourceRecord -ZoneName $Fqdn -A -Name $HostName -IPv4Address $IpAddress - Write-Output " [o] Successfully added DNS record for '$HostName' in '$Fqdn'" - } catch [Microsoft.Management.Infrastructure.CimException] { - Write-Output " [x] Failed to add new DNS record for '$HostName' in '$Fqdn'!" - } - -# Update the record if it does exist -} else { - if ($ExistingDnsRecord.RecordData.IPv4Address.ToString() -eq $IpAddress) { - Write-Output " [o] DNS record for '$HostName' in '$Fqdn' is already set to '$IpAddress'" - } else { - try { - $NewDnsRecord = $ExistingDnsRecord.Clone() - $NewDnsRecord.RecordData.IPv4Address = [System.Net.IPAddress]::parse($IpAddress) - $null = Set-DnsServerResourceRecord -NewInputObject $NewDnsRecord -OldInputObject $ExistingDnsRecord -ZoneName $Fqdn -PassThru - Write-Output " [o] Successfully updated DNS record for '$HostName' in '$Fqdn' to point to '$IpAddress'" - } catch [Microsoft.Management.Infrastructure.CimException] { - Write-Output " [x] Failed to update DNS record for '$HostName' in '$Fqdn'!" - } - } -} diff --git a/deployment/common/remote/ResetLdapPasswordOnAD.ps1 b/deployment/common/remote/ResetLdapPasswordOnAD.ps1 deleted file mode 100644 index 3636717991..0000000000 --- a/deployment/common/remote/ResetLdapPasswordOnAD.ps1 +++ /dev/null @@ -1,22 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# Fror details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $false, HelpMessage = "SAM account name for LDAP search account")] - [string]$ldapSearchSamAccountName, - [Parameter(Mandatory = $false, HelpMessage = "Base-64 encoded password for LDAP search account")] - [string]$ldapSearchPasswordB64 -) - -# Deserialise Base-64 encoded variables -# ------------------------------------- -$ldapSearchPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($ldapSearchPasswordB64)) - -# Reset password in Active Directory -# ---------------------------------- -Write-Output "Resetting password for '$ldapSearchSamAccountName'..." -Get-ADUser -Filter "SamAccountName -eq '$ldapSearchSamAccountName'" -Set-ADAccountPassword -Identity $ldapSearchSamAccountName -NewPassword (ConvertTo-SecureString -AsPlainText "$ldapSearchPassword" -Force) diff --git a/deployment/common/remote/ResetLdapPasswordOnVm.sh b/deployment/common/remote/ResetLdapPasswordOnVm.sh deleted file mode 100644 index 1808e16eea..0000000000 --- a/deployment/common/remote/ResetLdapPasswordOnVm.sh +++ /dev/null @@ -1,9 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# It expects the following parameters: -# ldapSearchPasswordB64 (Base-64 encoded password for the LDAP search user account) - -echo "Resetting LDAP password in /etc/ldap.secret..." -echo $ldapSearchPasswordB64 | base64 -d > /etc/ldap.secret diff --git a/deployment/common/resources/audit.rules b/deployment/common/resources/audit.rules deleted file mode 100644 index 4219687aaf..0000000000 --- a/deployment/common/resources/audit.rules +++ /dev/null @@ -1,95 +0,0 @@ -# This file contains the auditctl rules that are loaded -# whenever the audit daemon is started via the initscripts. -# The rules are simply the parameters that would be passed -# to auditctl. - -# First rule - delete all --D - -# Increase the buffers to survive stress events. -# Make this bigger for busy systems --b 8192 - -# This determines how long to wait in bursts of events ---backlog_wait_time 0 - -# Set failure mode to syslog --f 1 - -# Record all sockets --a always,exit -F arch=b64 -F a0=2 -S socket -k dsh-socket - -# Record all connections --a always,exit -F arch=b64 -F a0=2 -S connect -k dsh-connect - -# 5.2.4 Record Events That Modify Date and Time Information --a always,exit -F arch=b64 -S adjtimex -S settimeofday -k dsh-time-change --a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k dsh-time-change --a always,exit -F arch=b64 -S clock_settime -k dsh-time-change --a always,exit -F arch=b32 -S clock_settime -k dsh-time-change --w /etc/localtime -p wa -k dsh-time-change - -# 5.2.5 Record Events That Modify User/Group Information --w /etc/group -p wa -k dsh-identity --w /etc/passwd -p wa -k dsh-identity --w /etc/gshadow -p wa -k dsh-identity --w /etc/shadow -p wa -k dsh-identity --w /etc/security/opasswd -p wa -k dsh-identity - -# 5.2.6 Record Events That Modify the System's Network Environment --a always,exit -F arch=b64 -S sethostname -S setdomainname -k dsh-system-locale --a always,exit -F arch=b32 -S sethostname -S setdomainname -k dsh-system-locale --w /etc/issue -p wa -k dsh-system-locale --w /etc/issue.net -p wa -k dsh-system-locale --w /etc/hosts -p wa -k dsh-system-locale --w /etc/sysconfig/network -p wa -k dsh-system-locale - -# 5.2.7 Record Events That Modify the System's Mandatory Access Controls --w /etc/selinux/ -p wa -k dsh-MAC-policy - -# 5.2.8 Collect Login and Logout Events --w /var/log/faillog -p wa -k dsh-logins --w /var/log/lastlog -p wa -k dsh-logins --w /var/log/tallylog -p wa -k dsh-logins - -# 5.2.9 Collect Session Initiation Information --w /var/run/utmp -p wa -k dsh-session --w /var/log/wtmp -p wa -k dsh-session --w /var/log/btmp -p wa -k dsh-session - -# 5.2.10 Collect Discretionary Access Control Permission Modification Events --a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=500 -F auid!=4294967295 -k dsh-perm_mod --a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=500 -F auid!=4294967295 -k dsh-perm_mod --a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=500 -F auid!=4294967295 -k dsh-perm_mod --a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=500 -F auid!=4294967295 -k dsh-perm_mod --a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=500 -F auid!=4294967295 -k dsh-perm_mod --a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=500 -F auid!=4294967295 -k dsh-perm_mod - -# 5.2.11 Collect Unsuccessful Unauthorized Access Attempts to Files --a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=500 -F auid!=4294967295 -k dsh-access --a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=500 -F auid!=4294967295 -k dsh-access --a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=500 -F auid!=4294967295 -k dsh-access --a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=500 -F auid!=4294967295 -k dsh-access - -# 5.2.13 Collect Successful File System Mounts --a always,exit -F arch=b64 -S mount -F auid>=500 -F auid!=4294967295 -k dsh-mounts --a always,exit -F arch=b32 -S mount -F auid>=500 -F auid!=4294967295 -k dsh-mounts - -# 5.2.14 Collect File Deletion Events by User --a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=500 -F auid!=4294967295 -k dsh-delete --a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=500 -F auid!=4294967295 -k dsh-delete - -# 5.2.15 Collect Changes to System Administration Scope --w /etc/sudoers -p wa -k dsh-scope - -# 5.2.16 Collect System Administrator Actions --w /var/log/sudo.log -p wa -k dsh-actions - -# 5.2.17 Collect Kernel Module Loading and Unloading --w /sbin/insmod -p x -k dsh-modules --w /sbin/rmmod -p x -k dsh-modules --w /sbin/modprobe -p x -k dsh-modules --a always,exit -F arch=b64 -S init_module -S delete_module -k dsh-modules - -# 5.2.18 Make the Audit Configuration Immutable --e 2 diff --git a/deployment/common/resources/clamav-clamdscan.timer b/deployment/common/resources/clamav-clamdscan.timer deleted file mode 100644 index 0e4f46fd48..0000000000 --- a/deployment/common/resources/clamav-clamdscan.timer +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run ClamAV scan every day - -[Timer] -# Will run daily at 00:01:00 -OnCalendar=*-*-* 00:01:00 -AccuracySec=1h -# Will run if a job was missed (e.g. due to system being powered down) -Persistent=true - -[Install] -WantedBy=timers.target \ No newline at end of file diff --git a/deployment/common/resources/clamd.conf b/deployment/common/resources/clamd.conf deleted file mode 100644 index f29060a872..0000000000 --- a/deployment/common/resources/clamd.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Prevent access to infected files -OnAccessPrevention yes -OnAccessExcludeUname clamav -OnAccessExcludeRootUID yes \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/arm_templates/shm-dc-template.json b/deployment/safe_haven_management_environment/arm_templates/shm-dc-template.json deleted file mode 100644 index caf28524f7..0000000000 --- a/deployment/safe_haven_management_environment/arm_templates/shm-dc-template.json +++ /dev/null @@ -1,353 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "administratorPassword": { - "type": "securestring", - "metadata": { - "description": "Password for domain administrator" - } - }, - "administratorUsername": { - "type": "string", - "metadata": { - "description": "Username for domain administrator" - } - }, - "bootDiagnosticsAccountName": { - "type": "string", - "metadata": { - "description": "Name of storage account used for boot diagnostics" - } - }, - "dc1HostName": { - "type": "string", - "metadata": { - "description": "Hostname for DC1" - } - }, - "dc1IpAddress": { - "type": "string", - "metadata": { - "description": "Private IP address for DC1" - } - }, - "dc1VmName": { - "type": "string", - "metadata": { - "description": "VM name of DC1" - } - }, - "dc2HostName": { - "type": "string", - "metadata": { - "description": "Hostname for DC2" - } - }, - "dc2IpAddress": { - "type": "string", - "metadata": { - "description": "Private IP address for DC2" - } - }, - "dc2VmName": { - "type": "string", - "metadata": { - "description": "VM name of DC2" - } - }, - "externalDnsResolverIpAddress": { - "type": "string", - "metadata": { - "Description": "IP address for the external (Azure) DNS resolver" - } - }, - "shmId": { - "type": "string", - "metadata": { - "description": "Name of SHM" - } - }, - "virtualNetworkName": { - "type": "string", - "metadata": { - "description": "Name of virtual network to provision these VMs" - } - }, - "virtualNetworkResourceGroupName": { - "type": "string", - "metadata": { - "description": "Name of resource group that is associated with the virtual network above" - } - }, - "virtualNetworkSubnetName": { - "type": "string", - "metadata": { - "description": "Name of subnet where you want to provision this VM" - } - }, - "vmOsDiskSizeGb": { - "type": "int", - "metadata": { - "description": "Size of DC2 OS disk in GB" - } - }, - "vmOsDiskType": { - "type": "string", - "metadata": { - "description": "Type of DC2 OS disk" - } - }, - "vmSize": { - "type": "string", - "metadata": { - "description": "VM size of DC2" - } - } - }, - "variables": { - "dc1nic": "[concat(parameters('dc1VmName'),'-','NIC')]", - "dc2nic": "[concat(parameters('dc2VmName'),'-','NIC')]", - "avsetname": "[concat('AVSET-SHM-', toUpper(parameters('shmId')), '-VM-DC')]", - "vnetID": "[resourceId(parameters('virtualNetworkResourceGroupName'), 'Microsoft.Network/virtualNetworks', parameters('virtualNetworkName'))]", - "subnetId": "[concat(variables('vnetID'), '/subnets/', parameters('virtualNetworkSubnetName'))]" - }, - "resources": [ - { - "type": "Microsoft.Compute/availabilitySets", - "sku": { - "name": "Aligned" - }, - "name": "[variables('avsetname')]", - "apiVersion": "2019-07-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "platformUpdateDomainCount": 2, - "platformFaultDomainCount": 2 - } - }, - { - "type": "Microsoft.Compute/virtualMachines", - "name": "[parameters('dc1VmName')]", - "apiVersion": "2021-11-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "availabilitySet": { - "id": "[resourceId('Microsoft.Compute/availabilitySets', variables('avsetname'))]" - }, - "hardwareProfile": { - "vmSize": "[parameters('vmSize')]" - }, - "storageProfile": { - "imageReference": { - "publisher": "MicrosoftWindowsServer", - "offer": "WindowsServer", - "sku": "2022-Datacenter", - "version": "latest" - }, - "osDisk": { - "osType": "Windows", - "name": "[concat(parameters('dc1VmName'),'-OS-DISK')]", - "createOption": "FromImage", - "caching": "ReadWrite", - "writeAcceleratorEnabled": false, - "managedDisk": { - "storageAccountType": "[parameters('vmOsDiskType')]" - }, - "diskSizeGB": "[parameters('vmOsDiskSizeGb')]" - } - }, - "osProfile": { - "computerName": "[parameters('dc1HostName')]", - "adminUsername": "[parameters('administratorUsername')]", - "adminPassword": "[parameters('administratorPassword')]", - "windowsConfiguration": { - "enableAutomaticUpdates": true, - "provisionVMAgent": true - }, - "secrets": [] - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', variables('dc1nic'))]", - "properties": { - "primary": true - } - } - ] - }, - "diagnosticsProfile": { - "bootDiagnostics": { - "enabled": true, - "storageUri": "[concat('https://', parameters('bootDiagnosticsAccountName'), '.blob.core.windows.net/')]" - } - } - }, - "dependsOn": [ - "[resourceId('Microsoft.Compute/availabilitySets', variables('avsetname'))]", - "[resourceId('Microsoft.Network/networkInterfaces', variables('dc1nic'))]" - ] - }, - { - "type": "Microsoft.Network/networkInterfaces", - "name": "[variables('dc1nic')]", - "apiVersion": "2020-05-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "ipConfigurations": [ - { - "name": "ipconfig1", - "properties": { - "privateIPAddress": "[parameters('dc1IpAddress')]", - "privateIPAllocationMethod": "Static", - "subnet": { - "id": "[variables('subnetId')]" - }, - "primary": true, - "privateIPAddressVersion": "IPv4" - } - } - ], - "dnsSettings": { - "dnsServers": ["[parameters('dc1IpAddress')]", "[parameters('dc2IpAddress')]", "[parameters('externalDnsResolverIpAddress')]"] - }, - "enableAcceleratedNetworking": false, - "enableIPForwarding": false, - "primary": true, - "tapConfigurations": [] - }, - "dependsOn": [] - }, - { - "type": "Microsoft.Compute/virtualMachines/extensions", - "name": "[concat(parameters('dc1VmName'), '/', 'bginfo')]", - "apiVersion": "2019-07-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "autoUpgradeMinorVersion": true, - "publisher": "Microsoft.Compute", - "type": "bginfo", - "typeHandlerVersion": "2.1" - }, - "dependsOn": [ - "[resourceId('Microsoft.Compute/virtualMachines', parameters('dc1VmName'))]" - ] - }, - { - "type": "Microsoft.Compute/virtualMachines", - "name": "[parameters('dc2VmName')]", - "apiVersion": "2021-11-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "availabilitySet": { - "id": "[resourceId('Microsoft.Compute/availabilitySets', variables('avsetname'))]" - }, - "hardwareProfile": { - "vmSize": "[parameters('vmSize')]" - }, - "storageProfile": { - "imageReference": { - "publisher": "MicrosoftWindowsServer", - "offer": "WindowsServer", - "sku": "2022-Datacenter", - "version": "latest" - }, - "osDisk": { - "osType": "Windows", - "name": "[concat(parameters('dc2VmName'),'-OS-DISK')]", - "createOption": "FromImage", - "caching": "ReadWrite", - "writeAcceleratorEnabled": false, - "managedDisk": { - "storageAccountType": "[parameters('vmOsDiskType')]" - }, - "diskSizeGB": "[parameters('vmOsDiskSizeGb')]" - } - }, - "osProfile": { - "computerName": "[parameters('dc2HostName')]", - "adminUsername": "[parameters('administratorUsername')]", - "adminPassword": "[parameters('administratorPassword')]", - "windowsConfiguration": { - "enableAutomaticUpdates": true, - "provisionVMAgent": true - }, - "secrets": [] - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', variables('dc2nic'))]", - "properties": { - "primary": true - } - } - ] - }, - "diagnosticsProfile": { - "bootDiagnostics": { - "enabled": true, - "storageUri": "[concat('https://', parameters('bootDiagnosticsAccountName'), '.blob.core.windows.net', '/')]" - } - } - }, - "dependsOn": [ - "[resourceId('Microsoft.Compute/availabilitySets', variables('avsetname'))]", - "[resourceId('Microsoft.Network/networkInterfaces', variables('dc2nic'))]" - ] - }, - { - "type": "Microsoft.Network/networkInterfaces", - "name": "[variables('dc2nic')]", - "apiVersion": "2020-05-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "ipConfigurations": [ - { - "name": "ipconfig1", - "properties": { - "privateIPAddress": "[parameters('dc2IpAddress')]", - "privateIPAllocationMethod": "Static", - "subnet": { - "id": "[variables('subnetId')]" - }, - "primary": true, - "privateIPAddressVersion": "IPv4" - } - } - ], - "dnsSettings": { - "dnsServers": ["[parameters('dc1IpAddress')]", "[parameters('dc2IpAddress')]", "[parameters('externalDnsResolverIpAddress')]"] - }, - "enableAcceleratedNetworking": false, - "enableIPForwarding": false, - "primary": true, - "tapConfigurations": [] - }, - "dependsOn": [] - }, - { - "type": "Microsoft.Compute/virtualMachines/extensions", - "name": "[concat(parameters('dc2VmName'), '/', 'bginfo')]", - "apiVersion": "2019-07-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "autoUpgradeMinorVersion": true, - "publisher": "Microsoft.Compute", - "type": "bginfo", - "typeHandlerVersion": "2.1" - }, - "dependsOn": [ - "[resourceId('Microsoft.Compute/virtualMachines', parameters('dc2VmName'))]" - ] - } - ] -} \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-external-cran.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-external-cran.mustache.yaml deleted file mode 100644 index 9f4a567776..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-external-cran.mustache.yaml +++ /dev/null @@ -1,260 +0,0 @@ -#cloud-config - -# We know that exactly one data disk will be attached to this VM and we start numbering from lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true - -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 - -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /datadrive, ext4, "defaults,nofail"] - -# Configure apt repositories -apt: - preserve_sources_list: true - -# List of packages to install with apt-get -packages: - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - openssh-server - - rsync -package_update: true -package_upgrade: true - -# Initialise an empty file `internal_mirror_ip_addresses.txt` -# When internal mirrors are deployed, they add their IP address to this file -# Whenever `push_to_internal_mirrors.sh` is run, it will try to rsync to all of the IP addresses in the file -# NB. Since write_files runs before user creation, only pre-existing users can be assigned as file owners -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{monitoring.updateServers.schedule.daily_definition_updates.minute}} {{monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/cron.d/push-to-internal-mirrors" - permissions: "0644" - content: | - # Internal update (rsync) every day at 2am - 0 2 * * * mirrordaemon ~mirrordaemon/push_to_internal_mirrors.sh - - - path: "/etc/cron.d/pull-from-internet" - permissions: "0644" - content: | - # External update (rsync from CRAN) every 6 hours - 0 */6 * * * mirrordaemon ~mirrordaemon/pull_from_internet.sh - - - path: "/etc/cron.d/pull-then-push" - permissions: "0644" - content: | - # Run both jobs on every reboot - @reboot mirrordaemon ~mirrordaemon/pull_then_push.sh - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/home/mirrordaemon/internal_mirror_ip_addresses.txt" - permissions: "0600" - - - path: "/home/mirrordaemon/package_allowlist.txt" - permissions: "0600" - content: | - {{allowlist-full-r-cran-tier3.list}} - - - path: "/home/mirrordaemon/push_to_internal_mirrors.sh" - permissions: "0700" - content: | - #! /bin/bash - # rsync: make the destination look like the source - # -p preserve permissions - # -r recursive - # -t preserve times - # -l follow symlinks - # -v verbose - # --delete delete files present in destination but not source - # --progress show progress - echo "[PushToInternal] Found $(cat /home/mirrordaemon/internal_mirror_ip_addresses.txt | wc -l | xargs) internal mirrors" | tee >(logger -t SafeHaven) - for IP_ADDRESS in $(cat /home/mirrordaemon/internal_mirror_ip_addresses.txt); do - echo "[PushToInternal] Started pushing to the internal mirror at ${IP_ADDRESS}..." | tee >(logger -t SafeHaven) - START_TIME=$(date +%s) - rsync -prtlv --delete --progress /datadrive/mirrordaemon/www/cran/* mirrordaemon@${IP_ADDRESS}:/datadrive/mirrordaemon/www/cran 2>&1 | awk '{print "[PushToInternal] " $0}' | tee >(logger -t SafeHaven) - ELAPSED=$(date -u -d "0 $(date +%s) seconds - $START_TIME seconds" +"%H:%M:%S") - echo "[PushToInternal] Finished pushing to the internal mirror at $IP_ADDRESS after $ELAPSED" | tee >(logger -t SafeHaven) - done - - - path: "/home/mirrordaemon/pull_from_internet.sh" - permissions: "0700" - content: | - #! /bin/bash - TIER="{{perInstance.tier}}" - echo "[PullFromInternet] Started pulling from the internet..." | tee >(logger -t SafeHaven) - START_TIME=$(date +%s) - if [ "$TIER" == "tier2" ]; then - # Download all files - rsync -rtlvz --delete --delete-excluded --exclude=bin/windows/* --exclude=bin/macos*/* --progress cran.r-project.org::CRAN /datadrive/mirrordaemon/www/cran 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - else - # Download all allowed packages (which might be none) - ALLOWED_PACKAGES=$(grep -v "^#" /home/mirrordaemon/package_allowlist.txt) - # Note that there is a server-side timeout (30s) which causes problems for complicated commands - # By splitting into subdirectories we reduce the complexity of each rsync command - # Additionally, using an initial '/' anchors the search path at the directory root, speeding up calculation time - # Directory: /bin - INCLUDE_DIRS="" - for RPACKAGE in $ALLOWED_PACKAGES; do - INCLUDE_DIRS="${INCLUDE_DIRS} --include=/linux/ubuntu/*/${RPACKAGE}_*" - done - mkdir -p /datadrive/mirrordaemon/www/cran/bin - rsync -rtlvz --delete --delete-excluded --prune-empty-dirs --progress --include='*/' --include='/*' $INCLUDE_DIRS --exclude='*' cran.r-project.org::CRAN/bin/ /datadrive/mirrordaemon/www/cran/bin 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - # Directory: /src - INCLUDE_DIRS="--include=/contrib/PACKAGES" - for RPACKAGE in $ALLOWED_PACKAGES; do - INCLUDE_DIRS="${INCLUDE_DIRS} --include=/contrib/${RPACKAGE}_* --include=/contrib/Archive/${RPACKAGE}/*" - done - mkdir -p /datadrive/mirrordaemon/www/cran/src - rsync -rtlvz --delete --delete-excluded --prune-empty-dirs --progress --include='*/' --include='/*' $INCLUDE_DIRS --exclude='*' cran.r-project.org::CRAN/src/ /datadrive/mirrordaemon/www/cran/src 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - # Directory: /web - INCLUDE_DIRS="--include=/packages/index.html" - for RPACKAGE in $ALLOWED_PACKAGES; do - INCLUDE_DIRS="${INCLUDE_DIRS} --include=/checks/check_results_${RPACKAGE}.html --include=/dcmeta/${RPACKAGE}.xml --include=/packages/${RPACKAGE}/***" - done - mkdir -p /datadrive/mirrordaemon/www/cran/web - rsync -rtlvz --delete --delete-excluded --prune-empty-dirs --progress --include='*/' --include='/*' $INCLUDE_DIRS --exclude='*' cran.r-project.org::CRAN/web/ /datadrive/mirrordaemon/www/cran/web 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - # Top-level directory. Note that delete must *NOT* be enabled here. - rsync -rtlvz --prune-empty-dirs --progress --exclude '*/*' cran.r-project.org::CRAN /datadrive/mirrordaemon/www/cran 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - fi - ELAPSED=$(date -u -d "0 $(date +%s) seconds - $START_TIME seconds" +"%H:%M:%S") - echo "[PullFromInternet] Finished pulling from the internet after $ELAPSED" | tee >(logger -t SafeHaven) - - - path: "/home/mirrordaemon/pull_then_push.sh" - permissions: "0700" - content: | - #! /bin/bash - source ~mirrordaemon/pull_from_internet.sh - source ~mirrordaemon/push_to_internal_mirrors.sh - - - path: "/home/mirrordaemon/update_known_internal_mirrors.sh" - permissions: "0700" - content: | - {{update_known_internal_mirrors.sh}} - -# Add the SHM admin (default) and mirrordaemon users -# lock_passwd: Lock the password to disable password login -users: - - default - - name: mirrordaemon - lock_passwd: true - sudo: false - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Set up and partition data disk - - echo ">=== Setting up local disk... ===<" - - mkdir -p /datadrive/mirrordaemon/www/cran - - ls -alh /datadrive - - # Fix permissions so that mirrordaemon owns its files - - mkdir -p /home/mirrordaemon/.ssh - - chown -R mirrordaemon:mirrordaemon /datadrive/mirrordaemon - - chown -R mirrordaemon:mirrordaemon ~mirrordaemon - - ls -alh ~mirrordaemon - - # Generate SSH keys for connecting to the internal mirror - - echo ">=== Generating SSH keys for connecting to the internal mirror... ===<" - - cd /home/mirrordaemon/.ssh - - sudo -u mirrordaemon ssh-keygen -t rsa -b 2048 -N '' -f id_rsa - - ls -alh ~mirrordaemon/.ssh - - # Print out some diagnostic information - - echo ">=== This server is currently aware of internal mirrors at the following locations... ===<" - - cat /home/mirrordaemon/internal_mirror_ip_addresses.txt - - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-external-pypi.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-external-pypi.mustache.yaml deleted file mode 100644 index 7c60dd4faf..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-external-pypi.mustache.yaml +++ /dev/null @@ -1,344 +0,0 @@ -#cloud-config - -# We know that exactly one data disk will be attached to this VM and we start numbering from lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true - -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 - -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /datadrive, ext4, "defaults,nofail"] - -# Configure apt repositories -apt: - preserve_sources_list: true - -# List of packages to install with apt-get -packages: - - auditd - - build-essential - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - openssh-server - - python3-dev - - python3-pip - - python3-venv - - rsync -package_update: true -package_upgrade: true - -# Initialise an empty file `internal_mirror_ip_addresses.txt` -# When internal mirrors are deployed, they add their IP address to this file -# Whenever `push_to_internal_mirrors.sh` is run, it will try to rsync to all of the IP addresses in the file -# NB. Since write_files runs before user creation, only pre-existing users can be assigned as file owners -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/bandersnatch.conf" - permissions: "0644" - content: | - [mirror] - ; The directory where the mirror data will be stored. - directory = /datadrive/mirrordaemon/pypi - - ; Save JSON metadata into the web tree - ; URL/pypi/PKG_NAME/json (Symlink) -> URL/json/PKG_NAME - json = true - - ; The PyPI server which will be mirrored. - master = https://pypi.org - - ; The network socket timeout to use for all connections. The default of 10s is - ; deliberately low so the client will fail quickly and move on to the next package - ; instead of having the process hang. For this use-case we want to maximise the - ; number of packages that succeed. - timeout = 60 - - ; Whether to hash package indexes - ; Note that package index directory hashing is incompatible with pip - ; Recommended - the default of false for full pip/pypi compatibility. - hash-index = false - - ; Number of worker threads to use for parallel downloads. - ; Recommendations for worker thread setting - ; - leave the default of 3 to avoid overloading the pypi master - ; - official servers located in data centers could run 10 workers - ; - anything beyond 10 is probably unreasonable and avoided by bandersnatch - workers = 6 - - ; Whether to stop a sync quickly after an error is found or whether to continue - ; syncing but not marking the sync as successful. Value should be "true" or - ; "false". - stop-on-error = false - - ; Whether or not files that have been deleted on the master should be deleted here. - delete-packages = true - - [plugins] - enabled = - exclude_platform - - [blocklist] - platforms = - windows - macos - freebsd - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{monitoring.updateServers.schedule.daily_definition_updates.minute}} {{monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/cron.d/push-to-internal-mirrors" - permissions: "0644" - content: | - # Internal update (rsync) every day at 2am - 0 2 * * * mirrordaemon ~mirrordaemon/push_to_internal_mirrors.sh - - - path: "/etc/cron.d/pull-from-internet" - permissions: "0644" - content: | - # External update from PyPi every 6 hours - 0 */6 * * * mirrordaemon ~mirrordaemon/pull_from_internet.sh - - - path: "/etc/cron.d/pull-then-push" - permissions: "0644" - content: | - # Run both jobs on every reboot - @reboot mirrordaemon ~mirrordaemon/pull_then_push.sh - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/home/mirrordaemon/internal_mirror_ip_addresses.txt" - permissions: "0600" - - - path: "/home/mirrordaemon/package_allowlist.txt" - permissions: "0600" - content: | - {{allowlist-full-python-pypi-tier3.list}} - - - path: "/home/mirrordaemon/update_bandersnatch_config.py" - permissions: "0400" - content: | - #! /usr/bin/env python3 - from collections import defaultdict - - TIER="{{perInstance.tier}}" - config_sections = defaultdict(list) - section = "main" - - # Load packages from allowlist - with open("/home/mirrordaemon/package_allowlist.txt", "r") as f_allowlist: - packages = [p for p in f_allowlist.read().splitlines() if p and not p.startswith("#")] - - # Parse bandersnatch configuration into sections - with open("/etc/bandersnatch.conf", "r") as f_bandersnatch: - for line in f_bandersnatch.read().splitlines(): - if line.startswith("[") and line.endswith("]"): - section = line[1:-1] - if line: - config_sections[section].append(line) - - # Enable/disable allowlist depending on tier - config_sections["plugins"] = [p for p in config_sections["plugins"] if not "allowlist_project" in p] - if TIER == "tier2": - # Tier 2 mirrors should not have a allowlist section - config_sections.pop("allowlist", []) - else: - # If there is already a allowlist section then start by emptying it - config_sections["allowlist"] = ["[allowlist]", "packages ="] - - # Ensure the that allowlist_project plugin is enabled - config_sections["plugins"].append(" allowlist_project") - - # Add any allowed packages to this section - # Note that we need at least one package for package filtering to work. 'six' is minimal and safe. - if not packages: - packages = ["six"] - [config_sections["allowlist"].append(" {}".format(package)) for package in packages] - - # Write the output bandersnatch configuration - with open("/etc/bandersnatch.conf", "w") as f_bandersnatch: - for section in config_sections.keys(): - [f_bandersnatch.write(line + "\n") for line in config_sections[section]] - f_bandersnatch.write("\n") - print("Updated /etc/bandersnatch.conf to reflect /home/mirrordaemon/package_allowlist.txt") - - - path: "/home/mirrordaemon/push_to_internal_mirrors.sh" - permissions: "0700" - content: | - #! /bin/bash - # rsync: make the destination look like the source - # -p preserve permissions - # -r recursive - # -t preserve times - # -l follow symlinks - # -v verbose - # --delete delete files present in destination but not source - # --progress show progress - echo "[PushToInternal] Found $(cat /home/mirrordaemon/internal_mirror_ip_addresses.txt | wc -l | xargs) internal mirrors" | tee >(logger -t SafeHaven) - for IP_ADDRESS in $(cat /home/mirrordaemon/internal_mirror_ip_addresses.txt); do - echo "[PushToInternal] Started pushing to the internal mirror at ${IP_ADDRESS}..." | tee >(logger -t SafeHaven) - START_TIME=$(date +%s) - rsync -prtlv --delete --progress /datadrive/mirrordaemon/pypi/* mirrordaemon@${IP_ADDRESS}:/datadrive/mirrordaemon/pypi 2>&1 | awk '{print "[PushToInternal] " $0}' | tee >(logger -t SafeHaven) - ELAPSED=$(date -u -d "0 $(date +%s) seconds - $START_TIME seconds" +"%H:%M:%S") - echo "[PushToInternal] Finished pushing to the internal mirror at $IP_ADDRESS after $ELAPSED" | tee >(logger -t SafeHaven) - done - - - path: "/home/mirrordaemon/pull_from_internet.sh" - permissions: "0700" - content: | - #! /bin/bash - TIER="{{perInstance.tier}}" - echo "[PullFromInternet] Started pulling from the internet..." | tee >(logger -t SafeHaven) - START_TIME=$(date +%s) - if [ "$TIER" != "tier2" ]; then - installed_packages=$(ls -d /datadrive/mirrordaemon/pypi/web/simple/*/ | rev | cut -d'/' -f2 | rev | sed "s/[-_.]\+/-/g" | tr [A-Z] [a-z] | sort) - cat /home/mirrordaemon/package_allowlist.txt | sed "s/[-_.]\+/-/g" | tr [A-Z] [a-z] > /tmp/package_allowlist - packages_to_delete=$(for package in $installed_packages; do if [ "$(grep -w -i $package /tmp/package_allowlist)" = "" ]; then echo $package; fi; done) - /usr/local/bin/bandersnatch delete $packages_to_delete 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - rm /tmp/package_allowlist 2> /dev/null - fi - /usr/local/bin/bandersnatch mirror 2>&1 | awk '{print "[PullFromInternet] " $0}' | tee >(logger -t SafeHaven) - ELAPSED=$(date -u -d "0 $(date +%s) seconds - $START_TIME seconds" +"%H:%M:%S") - echo "[PullFromInternet] Finished pulling from the internet after $ELAPSED" | tee >(logger -t SafeHaven) - - - path: "/home/mirrordaemon/pull_then_push.sh" - permissions: "0700" - content: | - #! /bin/bash - source ~mirrordaemon/pull_from_internet.sh - source ~mirrordaemon/push_to_internal_mirrors.sh - - - path: "/home/mirrordaemon/update_known_internal_mirrors.sh" - permissions: "0700" - content: | - {{update_known_internal_mirrors.sh}} - -# Add the SHM admin (default) and mirrordaemon users -# lock_passwd: Lock the password to disable password login -users: - - default - - name: mirrordaemon - lock_passwd: true - sudo: false - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Set up and partition data disk - - echo ">=== Setting up local disk... ===<" - - mkdir -p /datadrive/mirrordaemon/pypi - - ls -alh /datadrive - - # Install bandersnatch with pip - - echo ">=== Installing bandersnatch... ===<" - - pip3 install bandersnatch==4.2.0 packaging==21.3 - - echo "Using bandersnatch from '$(which bandersnatch)'" - - # Initialise allowlist if appropriate - - python3 /home/mirrordaemon/update_bandersnatch_config.py - - # Fix permissions so that mirrordaemon owns its files - - mkdir -p /home/mirrordaemon/.ssh - - chown -R mirrordaemon:mirrordaemon /datadrive/mirrordaemon - - chown -R mirrordaemon:mirrordaemon ~mirrordaemon - - ls -alh ~mirrordaemon - - # Generate SSH keys for connecting to the internal mirror - - echo ">=== Generating SSH keys for connecting to the internal mirror... ===<" - - cd /home/mirrordaemon/.ssh - - sudo -u mirrordaemon ssh-keygen -t rsa -b 2048 -N '' -f id_rsa - - ls -alh ~mirrordaemon/.ssh - - # Print out some diagnostic information - - echo ">=== This server is currently aware of internal mirrors at the following locations ===<" - - cat /home/mirrordaemon/internal_mirror_ip_addresses.txt - - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-internal-cran.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-internal-cran.mustache.yaml deleted file mode 100644 index d2fcaf83f3..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-internal-cran.mustache.yaml +++ /dev/null @@ -1,175 +0,0 @@ -#cloud-config - -# We know that exactly one data disk will be attached to this VM and we currently start numbering from lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true - -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 - -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /datadrive, ext4, "defaults,nofail"] - -# Configure apt repositories -apt: - preserve_sources_list: true - -# List of packages to install with apt-get -packages: - - apache2 - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - openssh-server - - rsync -package_update: true -package_upgrade: true - -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{monitoring.updateServers.schedule.daily_definition_updates.minute}} {{monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: /var/local/cran-mirror-apache.conf - permissions: "0644" - content: | - - DocumentRoot "/datadrive/mirrordaemon/www/cran" - - Options Indexes FollowSymLinks Includes - AllowOverride All - Order allow,deny - Allow from all - Require all granted - - - -# Add the SHM admin (default) and mirrordaemon users -# lock_passwd: Lock the password to disable password login -users: - - default - - name: mirrordaemon - lock_passwd: true - sudo: false - ssh_authorized_keys: - - {{{perInstance.externalMirrorPublicKey}}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Show authorised SSH keys - - echo ">=== Authorised SSH keys... ===<" - - cat ~mirrordaemon/.ssh/authorized_keys - - # Upgrade installation and clean up - - echo ">=== Upgrade and clean up apt-get packages... ===<" - - apt-get -y upgrade - - apt-get clean - - # Set up disk - - echo ">=== Setting up local disk... ===<" - - mkdir -p /datadrive/mirrordaemon/www/cran - - ls -alh /datadrive - - # Fix permissions so that mirrordaemon owns its files - - chown -R mirrordaemon:mirrordaemon /datadrive/mirrordaemon - - # Set up apache: 'restart' runs it now; 'enable' adds it to the list of services run on boot - - echo ">=== Setting up apache server pointing to local disk... ===<" - - mv /var/local/cran-mirror-apache.conf /etc/apache2/sites-enabled/000-default.conf - - a2enmod rewrite - - systemctl enable apache2 - - systemctl restart apache2 - - systemctl status apache2 - - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-internal-pypi.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-internal-pypi.mustache.yaml deleted file mode 100644 index fe4359b297..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-mirror-internal-pypi.mustache.yaml +++ /dev/null @@ -1,279 +0,0 @@ -#cloud-config - -# We know that exactly one data disk will be attached to this VM and we currently start numbering from lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true - -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 - -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /datadrive, ext4, "defaults,nofail"] - -# Configure apt repositories -apt: - preserve_sources_list: true - -# List of packages to install with apt-get -packages: - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - openssh-server - - python3-pip - - python3-venv -package_update: true -package_upgrade: true - -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{monitoring.updateServers.schedule.daily_definition_updates.minute}} {{monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/etc/systemd/system/pypiserver.service" - content: | - [Unit] - Description=A minimal PyPI server for use with pip/easy_install. - After=network.target - - [Service] - Type=simple - PIDFile=/var/run/pypiserver.pid - User=mirrordaemon - Group=mirrordaemon - - ExecStart=/usr/local/bin/pypi-server -p 3128 -P . -a . -v --log-file /datadrive/mirrordaemon/pypiserver.log /datadrive/mirrordaemon/pypi/web/packages - ExecStop=/bin/kill -TERM $MAINPID - ExecReload=/bin/kill -HUP $MAINPID - Restart=always - - WorkingDirectory=/datadrive/mirrordaemon/pypi/ - - TimeoutStartSec=3 - RestartSec=5 - - [Install] - WantedBy=multi-user.target - - - path: "/var/local/pypiserver.app.py.patch" - content: | - --- _app.py 2019-04-03 19:51:21.000000000 +0100 - +++ _app.py 2019-04-03 20:27:05.000000000 +0100 - @@ -84,7 +84,2 @@ - - - try: - - numpkgs = len(list(packages())) - - except: - - numpkgs = 0 - - - # Ensure template() does not consider `msg` as filename! - @@ -94,3 +89,3 @@ - VERSION=__version__, - - NUMPKGS=numpkgs, - + NUMPKGS="mirrored", - PACKAGES=urljoin(fp, "packages/"), - @@ -108,2 +103,3 @@ - def doc_upload(): - + raise HTTPError(405, "documentation upload not allowed") - try: - @@ -121,2 +117,3 @@ - def remove_pkg(): - + raise HTTPError(405, "package removal not allowed") - name = request.forms.get("name") - @@ -140,2 +137,3 @@ - def file_upload(): - + raise HTTPError(405, "file upload not allowed") - ufiles = Upload._make( - @@ -215,2 +213,23 @@ - ordering = 0 - + # Override package search (which times out) with a lookup from the simple static package index - + with open("/datadrive/mirrordaemon/pypi/web/simple/index.html", "r") as f_package_index: - + for line in f_package_index.readlines(): - + if "")[1].split("<")[0] - + if value in package_name: - + versions = ["exists"] - + with open("/datadrive/mirrordaemon/pypi/web/simple/{}/index.html".format(package_name), "r") as f_package_details: - + for line in f_package_details.readlines(): - + if ".tar.gz" in line: - + try: - + versions.append(line.split(">")[1].split("-")[-1].split(".tar.gz")[0]) - + except IndexError: - + pass - + d = {"_pypi_ordering": 0, "version": versions[-1], - + "name": package_name, "summary": "{} is available".format(package_name)} - + response.append(d) - + ordering += 1 - + call_string = xmlrpclib.dumps((response,), 'search', methodresponse=True) - + return call_string - + # End of patch - for p in packages(): - @@ -231,2 +250,6 @@ - def simpleindex(): - + # Overriding simple index pages with static content - + with open("/datadrive/mirrordaemon/pypi/web/simple/index.html", "r") as f_index: - + return f_index.read() - + # End of patch - links = sorted(core.get_prefixes(packages())) - @@ -255,3 +278,6 @@ - return redirect('/simple/{0}/'.format(normalized), 301) - - - + # Overriding simple index pages with static content - + with open("/datadrive/mirrordaemon/pypi/web/simple/{}/index.html".format(prefix), "r") as f_index: - + return f_index.read() - + # End of patch - files = sorted(core.find_packages(packages(), prefix=prefix), - @@ -286,2 +312,5 @@ - def list_packages(): - + # Redirecting to the simple endpoint - + return redirect(request.fullpath.replace("packages", "simple"), 302) - + # End of patch - fp = request.fullpath - @@ -312,2 +341,10 @@ - def server_static(filename): - + # Overriding package root check since all packages are in one directory - + root = "/datadrive/mirrordaemon/pypi/web/packages" - + response = static_file(filename, root=root, mimetype=mimetypes.guess_type(filename)[0]) - + if config.cache_control: - + response.set_header( - + "Cache-Control", "public, max-age=%s" % config.cache_control) - + return response - + # End of patch - entries = core.find_packages(packages()) - -# Add the SHM admin (default) and mirrordaemon users -# lock_passwd: Lock the password to disable password login -users: - - default - - name: mirrordaemon - lock_passwd: true - sudo: false - ssh_authorized_keys: - - {{{perInstance.externalMirrorPublicKey}}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Show authorised SSH keys - - echo ">=== Authorised SSH keys... ===<" - - cat ~mirrordaemon/.ssh/authorized_keys - - # Upgrade installation and clean up - - echo ">=== Upgrade and clean up apt-get packages... ===<" - - apt-get -y upgrade - - apt-get clean - - # Set up disk, fixing permissions so that mirrordaemon owns its files - - echo ">=== Setting up local disk... ===<" - - mkdir -p /datadrive/mirrordaemon/pypi/web/packages - - chown -R mirrordaemon:mirrordaemon /datadrive/mirrordaemon - - ls -alh /datadrive - - # Install pypiserver with pip - - echo ">=== Installing pypiserver... ===<" - - pip3 install pypiserver==1.2.7 - - # Patch pypiserver so that it uses the static simple index files - - echo ">=== Patching pypiserver to use static index files ===<" - - cd /usr/local/lib/python3*/dist-packages/pypiserver - - patch < /var/local/pypiserver.app.py.patch - - cd / - - echo "Using pypi-server from '$(which pypi-server)'" - - # Set up pypiserver: 'start' runs it now; 'enable' adds it to the list of services run on boot - - echo ">=== Setting up pypiserver pointing to local disk... ===<" - - systemctl enable pypiserver - - systemctl start pypiserver - - systemctl status pypiserver - - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-proxy.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-proxy.mustache.yaml deleted file mode 100644 index f6c7377302..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/cloud-init-repository-proxy.mustache.yaml +++ /dev/null @@ -1,187 +0,0 @@ -#cloud-config - -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{monitoring.updateServers.schedule.daily_definition_updates.minute}} {{monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/nexus/docker-compose.yaml" - permissions: "0400" - content: | - {{nexus_docker_compose.mustache.yaml}} - - - path: "/etc/nexus/allowlist-pypi" - permissions: "0644" - content: | - {{allowlist-full-python-pypi-tier3.list}} - - - path: "/etc/nexus/allowlist-cran" - permissions: "0644" - content: | - {{allowlist-full-r-cran-tier3.list}} - - - path: "/etc/nexus/nginx.conf" - permissions: "0400" - content: | - {{nexus_nginx.mustache.conf}} - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/usr/local/bin/configure-nexus" - permissions: "0755" - content: | - {{configure_nexus.py}} - - - path: "/usr/local/update-nexus-allowlists" - permissions: "0700" - content: | - /usr/local/bin/configure-nexus --admin-password {{perInstance.nexusAdminPassword}} update-allowlists --tier {{perInstance.tier}} --pypi-package-file /etc/nexus/allowlist-pypi --cran-package-file /etc/nexus/allowlist-cran >> /var/log/configure_nexus.log 2>&1 - - - path: "/opt/configuration/docker_pat.txt" - permissions: "0400" - content: {{dockerPassword}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -# Configure apt repositories -apt: - preserve_sources_list: true - -# Install necessary apt packages -packages: - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - docker.io - - docker-compose-v2 - - python3-requests -package_update: true -package_upgrade: true - -# Add the SHM admin (default) and nexusdaemon users -users: - - default - - name: nexusdaemon - lock_passwd: true # Lock the password to disable password login - sudo: false # This user will not have sudo privileges - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Ensure that Docker is running and enabled at startup - - echo ">=== Configuring Docker... ===<" - - systemctl enable docker - - systemctl start docker - - sleep 1m - - systemctl status docker - - docker --version - - docker compose version - - # Create directory for Nexus data that is owned by the correct user inside the Docker container - - echo ">=== Creating Nexus data directory... ===<" - - mkdir /nexus-data - - chown -R 200:200 /nexus-data - - # Set up the guacamoledaemon user - - echo ">=== Configuring nexusdaemon user... ===<" - - groupadd docker 2> /dev/null - - usermod -aG docker nexusdaemon - - newgrp docker - - chown -R nexusdaemon:nexusdaemon /etc/nexus - - ls -alh /etc/nexus - - # Set up the Nexus container - - echo ">=== Creating Nexus container... ===<" - - chown nexusdaemon:nexusdaemon /opt/configuration/docker_pat.txt # Ensure that the file is owned by the nexusdaemon user - - su nexusdaemon -c "cat /opt/configuration/docker_pat.txt | docker login --username '{{dockerAccount}}' --password-stdin - && docker compose -f /etc/nexus/docker-compose.yaml up -d" - - # Give Nexus some time to initialise - - echo ">=== Waiting for Nexus to initialise (5 minutes)... ===<" - - sleep 5m - - # Configure Nexus - - echo ">=== Configuring Nexus... ===<" - - configure-nexus --admin-password {{perInstance.nexusAdminPassword}} change-initial-password --path /nexus-data - - configure-nexus --admin-password {{perInstance.nexusAdminPassword}} initial-configuration --tier {{perInstance.tier}} --pypi-package-file /etc/nexus/allowlist-pypi --cran-package-file /etc/nexus/allowlist-cran - -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/safe_haven_management_environment/cloud_init/cloud-init-update-server-linux.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/cloud-init-update-server-linux.mustache.yaml deleted file mode 100644 index cac0cb6e80..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/cloud-init-update-server-linux.mustache.yaml +++ /dev/null @@ -1,115 +0,0 @@ -#cloud-config - -write_files: - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{monitoring.updateServers.schedule.daily_definition_updates.minute}} {{monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/etc/squid-deb-proxy/mirror-dstdomain.acl.d/20-data-safe-haven" - content: | - # Additional mirror domains that are allowed by this cache - apt.postgresql.org - d20rj4el6vkp4c.cloudfront.net - dbeaver.io - developer.download.nvidia.com - packages.gitlab.com - packages.microsoft.com - qgis.org - ubuntu.qgis.org - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -# Install necessary apt packages -packages: - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - squid-deb-proxy -package_update: true -package_upgrade: true - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Remove the unnecessary squid service and prevent it from running - - service squid stop - - systemctl disable -f squid - - update-rc.d -f squid remove - -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/safe_haven_management_environment/cloud_init/resources/configure_nexus.py b/deployment/safe_haven_management_environment/cloud_init/resources/configure_nexus.py deleted file mode 100755 index f460162799..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/resources/configure_nexus.py +++ /dev/null @@ -1,774 +0,0 @@ -#! /usr/bin/env python3 -from argparse import ArgumentParser -from pathlib import Path -import re -import requests - -_NEXUS_REPOSITORIES = { - "pypi_proxy": dict( - repo_type="pypi", - name="pypi-proxy", - remote_url="https://pypi.org/" - ), - "cran_proxy": dict( - repo_type="r", - name="cran-proxy", - remote_url="https://cran.r-project.org/" - ) -} - -_ROLE_NAME = "safe haven user" - - -class NexusAPI: - """Interface to the Nexus REST API""" - - def __init__(self, *, password, username="admin", - nexus_path="http://localhost", nexus_port="80"): - self.nexus_api_root = f"{nexus_path}:{nexus_port}/service/rest" - self.username = username - self.password = password - - @property - def auth(self): - return requests.auth.HTTPBasicAuth(self.username, self.password) - - def change_admin_password(self, new_password): - """ - Change the password of the 'admin' account - - Args: - new_password: New password to be set - """ - print(f"Old password: {self.password}") - print(f"New password: {new_password}") - response = requests.put( - f"{self.nexus_api_root}/v1/security/users/admin/change-password", - auth=self.auth, - headers={"content-type": "text/plain"}, - data=new_password, - ) - if response.status_code == 204: - print("Changed admin password") - self.password = new_password - else: - print("Changing password failed") - print(response.content) - - def delete_all_repositories(self): - """Delete all existing repositories""" - response = requests.get( - f"{self.nexus_api_root}/v1/repositories", auth=self.auth - ) - repositories = response.json() - - for repo in repositories: - name = repo["name"] - print(f"Deleting repository: {name}") - response = requests.delete( - f"{self.nexus_api_root}/v1/repositories/{name}", auth=self.auth - ) - code = response.status_code - if code == 204: - print("Repository successfully deleted") - else: - print(f"Repository deletion failed.\nStatus code:{code}") - print(response.content) - - def create_proxy_repository(self, repo_type, name, remote_url): - """ - Create a proxy repository. Currently supports PyPI and R formats - - Args: - repo_type: Type of repository, one of 'pypi' or 'r' - name: Name of the repository - remote_url: Path of the repository to proxy - """ - assert repo_type in ["pypi", "r"] - - payload = { - "name": "", - "online": True, - "storage": { - "blobStoreName": "default", - "strictContentTypeValidation": True, - }, - "proxy": {"remoteUrl": "", "contentMaxAge": 1440, "metadataMaxAge": 1440}, - "negativeCache": {"enabled": True, "timeToLive": 1440}, - "httpClient": { - "blocked": False, - "autoBlock": True, - }, - } - payload["name"] = name - payload["proxy"]["remoteUrl"] = remote_url - - print(f"Creating {repo_type} repository: {name}") - response = requests.post( - f"{self.nexus_api_root}/v1/repositories/{repo_type}/proxy", - auth=self.auth, - json=payload, - ) - code = response.status_code - if code == 201: - print(f"{repo_type} proxy successfully created") - else: - print(f"{repo_type} proxy creation failed.\nStatus code: {code}") - print(response.content) - - def delete_all_content_selectors(self): - """Delete all existing content selectors""" - response = requests.get( - f"{self.nexus_api_root}/v1/security/content-selectors", auth=self.auth - ) - content_selectors = response.json() - - for content_selector in content_selectors: - name = content_selector["name"] - print(f"Deleting content selector: {name}") - response = requests.delete( - f"{self.nexus_api_root}/v1/security/content-selectors/{name}", - auth=self.auth, - ) - code = response.status_code - if code == 204: - print("Content selector successfully deleted") - else: - print(f"Content selector deletion failed.\nStatus code:{code}") - print(response.content) - - def create_content_selector(self, name, description, expression): - """ - Create a new content selector - - Args: - name: Name of the content selector - description: Description of the content selector - expression: CSEL query (https://help.sonatype.com/repomanager3/nexus-repository-administration/access-control/content-selectors) - to identify content - """ - payload = { - "name": f"{name}", - "description": f"{description}", - "expression": f"{expression}", - } - - print(f"Creating content selector: {name}") - response = requests.post( - f"{self.nexus_api_root}/v1/security/content-selectors", - auth=self.auth, - json=payload, - ) - code = response.status_code - if code == 204: - print("content selector successfully created") - elif code == 500: - print("content selector already exists") - else: - print(f"content selector creation failed.\nStatus code: {code}") - print(response.content) - - def delete_all_content_selector_privileges(self): - """Delete all existing content selector privileges""" - response = requests.get( - f"{self.nexus_api_root}/v1/security/privileges", auth=self.auth - ) - privileges = response.json() - - for privilege in privileges: - if privilege["type"] != "repository-content-selector": - continue - - name = privilege["name"] - print(f"Deleting content selector privilege: {name}") - response = requests.delete( - f"{self.nexus_api_root}/v1/security/privileges/{name}", auth=self.auth - ) - code = response.status_code - if code == 204: - print(f"Content selector privilege: {name} successfully deleted") - else: - print( - "Content selector privilege deletion failed." f"Status code:{code}" - ) - print(response.content) - - def create_content_selector_privilege(self, name, description, repo_type, - repo, content_selector): - """ - Create a new content selector privilege - - Args: - name: Name of the content selector privilege - description: Description of the content selector privilege - repo_type: Type of repository this privilege applies to - repo: Name of the repository this privilege applies to - content_selector: Name of the content selector applied to this - privilege - """ - payload = { - "name": f"{name}", - "description": f"{description}", - "actions": ["READ"], - "format": f"{repo_type}", - "repository": f"{repo}", - "contentSelector": f"{content_selector}", - } - - print(f"Creating content selector privilege: {name}") - response = requests.post( - ( - f"{self.nexus_api_root}/v1/security/privileges" - "/repository-content-selector" - ), - auth=self.auth, - json=payload, - ) - code = response.status_code - if code == 201: - print(f"content selector privilege {name} successfully created") - elif code == 400: - print(f"content selector privilege {name} already exists") - else: - print( - f"content selector privilege {name} creation failed.\n" - f"Status code: {code}" - ) - print(response.content) - - def delete_all_custom_roles(self): - """Delete all roles except for the default 'nx-admin' and 'nxanonymous'""" - response = requests.get( - f"{self.nexus_api_root}/v1/security/roles", auth=self.auth - ) - roles = response.json() - - for role in roles: - name = role["name"] - if name in ["nx-admin", "nx-anonymous"]: - continue - - print(f"Deleting role: {name}") - response = requests.delete( - f"{self.nexus_api_root}/v1/security/roles/{name}", auth=self.auth - ) - code = response.status_code - if code == 204: - print("Role successfully deleted") - else: - print(f"Role deletion failed.\nStatus code:{code}") - print(response.content) - - def create_role(self, name, description, privileges, roles=[]): - """ - Create a new role - - Args: - name: Name of the role (also becomes the role id) - description: Description of the role - privileges: Privileges to be granted to the role - roles: Roles to be granted to the role - """ - payload = { - "id": f"{name}", - "name": f"{name}", - "description": f"{description}", - "privileges": privileges, - "roles": roles, - } - - print(f"Creating role: {name}") - response = requests.post( - (f"{self.nexus_api_root}/v1/security/roles"), auth=self.auth, json=payload - ) - code = response.status_code - if code == 200: - print(f"role {name} successfully created") - elif code == 400: - print(f"role {name} already exists") - else: - print(f"role {name} creation failed.\nStatus code: {code}") - print(response.content) - - def update_role(self, name, description, privileges, roles=[]): - """ - Update an existing role - - Args: - name: Name of the role (also assumed to be the role id) - description: Description of the role - privileges: Privileges to be granted to the role (overwrites all - existing privileges) - roles: Roles to be granted to the role (overwrites all existing - roles) - """ - payload = { - "id": f"{name}", - "name": f"{name}", - "description": f"{description}", - "privileges": privileges, - "roles": roles, - } - - print(f"updating role: {name}") - response = requests.put( - (f"{self.nexus_api_root}/v1/security/roles/{name}"), auth=self.auth, json=payload - ) - code = response.status_code - if code == 204: - print(f"role {name} successfully created") - elif code == 404: - print(f"role {name} does not exist") - else: - print(f"role {name} update failed.\nStatus code: {code}") - print(response.content) - - def enable_anonymous_access(self): - """Enable access from anonymous users (where no credentials are supplied)""" - response = requests.put( - f"{self.nexus_api_root}/v1/security/anonymous", - auth=self.auth, - json={ - "enabled": True, - "userId": "anonymous", - "realName": "Local Authorizing Realm", - }, - ) - code = response.status_code - if code == 200: - print("Anonymous access enabled") - else: - print(f"Enabling anonymous access failed.\nStatus code: {code}") - print(response.content) - - def update_anonymous_user_roles(self, roles): - """ - Update the roles assigned to the 'anonymous' user - - Args: - roles: Roles to be assigned to the anonymous user, overwrites all - existing roles - """ - # Get existing user data JSON - response = requests.get( - f"{self.nexus_api_root}/v1/security/users", auth=self.auth - ) - users = response.json() - for user in users: - if user["userId"] == "anonymous": - anonymous_user = user - break - - # Change roles - anonymous_user["roles"] = roles - - # Push changes to Nexus - response = requests.put( - f"{self.nexus_api_root}/v1/security/users/{anonymous_user['userId']}", - auth=self.auth, - json=anonymous_user, - ) - code = response.status_code - if code == 204: - print(f"User {anonymous_user['userId']} roles updated") - else: - print( - f"User {anonymous_user['userId']} role update failed.\n" - f"Status code: {code}" - ) - print(response.content) - - -def main(): - parser = ArgumentParser(description="Configure Nexus3") - parser.add_argument( - "--admin-password", - type=str, - required=True, - help="Password for the Nexus 'admin' account", - ) - - # Group of arguments for tiers and package files - tier_parser = ArgumentParser(add_help=False) - tier_parser.add_argument( - "--tier", - type=int, - required=True, - choices=[2, 3], - help="Data security tier of the repository", - ) - tier_parser.add_argument( - "--pypi-package-file", - type=Path, - help="Path of the file of allowed PyPI packages, ignored when TIER!=3" - ) - tier_parser.add_argument( - "--cran-package-file", - type=Path, - help="Path of the file of allowed CRAN packages, ignored when TIER!=3" - ) - - subparsers = parser.add_subparsers( - title="subcommands", - required=True - ) - - # sub-command for changing initial password - parser_password = subparsers.add_parser( - "change-initial-password", - help="Change the initial admin password" - ) - parser_password.add_argument( - "--path", - type=Path, - default=Path("./nexus-data"), - help="Path of the nexus-data directory [./nexus-data]" - ) - parser_password.set_defaults(func=change_initial_password) - - # sub-command for initial configuration - parser_configure = subparsers.add_parser( - "initial-configuration", - help="Configure the Nexus repository", - parents=[tier_parser] - ) - parser_configure.set_defaults(func=initial_configuration) - - # sub-command for updating package allow lists - parser_update = subparsers.add_parser( - "update-allowlists", - help="Update the Nexus package allowlists", - parents=[tier_parser] - ) - parser_update.set_defaults(func=update_allow_lists) - - args = parser.parse_args() - - args.func(args) - - -def change_initial_password(args): - """ - Change the initial password created during Nexus deployment - - The initial password is stored in a file called 'admin.password' which is - automatically removed when the password is first changed. - - Args: - args: Command line arguments - - raises: - Exception: If 'admin.password' is not found - """ - password_file_path = Path(f"{args.path}/admin.password") - - try: - with password_file_path.open() as password_file: - initial_password = password_file.read() - except FileNotFoundError: - raise Exception( - "Initial password appears to have been already changed" - ) - - nexus_api = NexusAPI(password=initial_password) - - nexus_api.change_admin_password(args.admin_password) - - -def initial_configuration(args): - """ - Fully configure Nexus in an idempotent manner. - - This includes: - - Deleting all respositories - - Creating CRAN and PyPI proxies - - Deleting all content selectors and content selector privileges - - Creating content selectors and content selector privileges according - to the tier and allowlists - - Deleting all non-default roles - - Creating a role with the previously defined content selector - privileges - - Giving anonymous users ONLY the previously defined role - - Enabling anonymous access - - Args: - args: Command line arguments - """ - check_package_files(args) - - nexus_api = NexusAPI(password=args.admin_password) - - # Ensure only desired repositories exist - recreate_repositories(nexus_api) - - pypi_allowlist, cran_allowlist = get_allowlists(args.pypi_package_file, - args.cran_package_file) - privileges = recreate_privileges(args.tier, nexus_api, pypi_allowlist, - cran_allowlist) - - # Delete non-default roles - nexus_api.delete_all_custom_roles() - - # Create a role for safe haven users - nexus_api.create_role( - name=_ROLE_NAME, - description="allows access to selected packages", - privileges=privileges - ) - - # Update anonymous users roles - nexus_api.update_anonymous_user_roles([_ROLE_NAME]) - - # Enable anonymous access - nexus_api.enable_anonymous_access() - - -def update_allow_lists(args): - """ - Update which packages anonymous users may access AFTER the initial, full - configuration of the Nexus server. - - The following steps will occur: - - Deleting all content selectors and content selector privileges - - Creating content selectors and content selector privileges according - to the tier and allowlists - - Updating the anonymous accounds only role role with the previously - defined content selector privileges - - Args: - args: Command line arguments - """ - check_package_files(args) - - nexus_api = NexusAPI(password=args.admin_password) - - pypi_allowlist, cran_allowlist = get_allowlists(args.pypi_package_file, - args.cran_package_file) - privileges = recreate_privileges(args.tier, nexus_api, pypi_allowlist, - cran_allowlist) - - # Update role for safe haven users - nexus_api.update_role( - name=_ROLE_NAME, - description="allows access to selected packages", - privileges=privileges - ) - - -def check_package_files(args): - """ - Ensure that the allowlist files exist - - Args: - args: Command line arguments - - raise: - Exception: if any declared allowlist file does not exist - """ - for package_file in [args.pypi_package_file, args.cran_package_file]: - if package_file and not package_file.is_file(): - raise Exception( - f"Package allowlist file {package_file} does not exist" - ) - - -def get_allowlists(pypi_package_file, cran_package_file): - """ - Create allowlists for PyPI and CRAN packages - - Args: - pypi_package_file: Path to the PyPI allowlist file or None - cran_package_file: Path to the CRAN allowlist file or None - - Returns: - A tuple of the PyPI and CRAN allowlists (in that order). The lists are - [] if the corresponding package file argument was None - """ - pypi_allowlist = [] - cran_allowlist = [] - - if pypi_package_file: - pypi_allowlist = get_allowlist(pypi_package_file, False) - - if cran_package_file: - cran_allowlist = get_allowlist(cran_package_file, True) - - return (pypi_allowlist, cran_allowlist) - - -def get_allowlist(allowlist_path, is_cran): - """ - Read list of allowed packages from a file - - Args: - allowlist_path: Path to the allowlist file - is_cran: True if the allowlist if for CRAN, False if it is for PyPI - - Returns: - List of the package names specified in the file - """ - allowlist = [] - with open(allowlist_path, "r") as allowlist_file: - # Sanitise package names - # - convert to lower case if the package is on PyPI. Leave alone on CRAN to prevent issues with case-sensitivity - # - convert special characters to '-' - # - remove any blank entries, which act as a wildcard that would allow any package - special_characters = re.compile(r"[^0-9a-zA-Z]+") - for package_name in allowlist_file.readlines(): - if is_cran: - package_name = special_characters.sub("-", package_name.strip()) - else: - package_name = special_characters.sub("-", package_name.lower().strip()) - if package_name: - allowlist.append(package_name) - return allowlist - - -def recreate_repositories(nexus_api): - """ - Create PyPI and CRAN proxy repositories in an idempotent manner - - Args: - nexus_api: NexusAPI object - """ - # Delete all existing repositories - nexus_api.delete_all_repositories() - - for repository in _NEXUS_REPOSITORIES.values(): - nexus_api.create_proxy_repository(**repository) - - -def recreate_privileges(tier, nexus_api, pypi_allowlist=[], - cran_allowlist=[]): - """ - Create content selectors and content selector privileges based on tier and - allowlists in an idempotent manner - - Args: - nexus_api: NexusAPI object - pypi_allowlist: List of allowed PyPI packages - cran_allowlist: List of allowed CRAN packages - - Returns: - List of the names of all content selector privileges - """ - # Delete all existing content selector privileges - # These must be deleted before the content selectors as the content selectors - # as the privileges depend on the content selectors - nexus_api.delete_all_content_selector_privileges() - - # Delete all existing content selectors - nexus_api.delete_all_content_selectors() - - pypi_privilege_names = [] - cran_privilege_names = [] - - # Content selector and privilege for PyPI 'simple' path, used to search for - # packages - privilege_name = create_content_selector_and_privilege( - nexus_api, - name="simple", - description="Allow access to 'simple' directory in PyPI repository", - expression='format == "pypi" and path=^"/simple"', - repo_type=_NEXUS_REPOSITORIES["pypi_proxy"]["repo_type"], - repo=_NEXUS_REPOSITORIES["pypi_proxy"]["name"] - ) - pypi_privilege_names.append(privilege_name) - - # Content selector and privilege for CRAN 'PACKAGES' file which contains an - # index of all packages - privilege_name = create_content_selector_and_privilege( - nexus_api, - name="packages", - description="Allow access to 'PACKAGES' file in CRAN repository", - expression='format == "r" and path=="/src/contrib/PACKAGES"', - repo_type=_NEXUS_REPOSITORIES["cran_proxy"]["repo_type"], - repo=_NEXUS_REPOSITORIES["cran_proxy"]["name"] - ) - cran_privilege_names.append(privilege_name) - - # Create content selectors and privileges for packages according to the tier - if tier == 2: - # Allow all PyPI packages - privilege_name = create_content_selector_and_privilege( - nexus_api, - name="pypi-all", - description="Allow access to all PyPI packages", - expression='format == "pypi" and path=^"/packages/"', - repo_type=_NEXUS_REPOSITORIES["pypi_proxy"]["repo_type"], - repo=_NEXUS_REPOSITORIES["pypi_proxy"]["name"] - ) - pypi_privilege_names.append(privilege_name) - - # Allow all CRAN packages - privilege_name = create_content_selector_and_privilege( - nexus_api, - name="cran-all", - description="Allow access to all CRAN packages", - expression='format == "r" and path=^"/src/contrib"', - repo_type=_NEXUS_REPOSITORIES["cran_proxy"]["repo_type"], - repo=_NEXUS_REPOSITORIES["cran_proxy"]["name"] - ) - cran_privilege_names.append(privilege_name) - elif tier == 3: - # Allow selected PyPI packages - for package in pypi_allowlist: - privilege_name = create_content_selector_and_privilege( - nexus_api, - name=f"pypi-{package}", - description=f"Allow access to {package} on PyPI", - expression=f'format == "pypi" and path=^"/packages/{package}/"', - repo_type=_NEXUS_REPOSITORIES["pypi_proxy"]["repo_type"], - repo=_NEXUS_REPOSITORIES["pypi_proxy"]["name"] - ) - pypi_privilege_names.append(privilege_name) - - # Allow selected CRAN packages - for package in cran_allowlist: - privilege_name = create_content_selector_and_privilege( - nexus_api, - name=f"cran-{package}", - description=f"allow access to {package} on CRAN", - expression=f'format == "r" and path=^"/src/contrib/{package}_"', - repo_type=_NEXUS_REPOSITORIES["cran_proxy"]["repo_type"], - repo=_NEXUS_REPOSITORIES["cran_proxy"]["name"] - ) - cran_privilege_names.append(privilege_name) - - return (pypi_privilege_names + cran_privilege_names) - - -def create_content_selector_and_privilege(nexus_api, name, description, - expression, repo_type, repo): - """ - Create a content selector and corresponding content selector privilege - - Args: - nexus_api: NexusAPI object - name: Name shared by the content selector and content selector - privilege - description: Description shared by the content selector and content - selector privilege - expression: CSEL expression defining the content selector - repo_type: Type of repository the content selector privilege applies to - repo: Name of the repository the content selector privilege applies to - """ - nexus_api.create_content_selector( - name=name, - description=description, - expression=expression - ) - - nexus_api.create_content_selector_privilege( - name=name, - description=description, - repo_type=repo_type, - repo=repo, - content_selector=name, - ) - - return name - - -if __name__ == "__main__": - main() diff --git a/deployment/safe_haven_management_environment/cloud_init/resources/nexus_docker_compose.mustache.yaml b/deployment/safe_haven_management_environment/cloud_init/resources/nexus_docker_compose.mustache.yaml deleted file mode 100644 index ddb046d615..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/resources/nexus_docker_compose.mustache.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -version: "3" -services: - nexus: - container_name: nexus_nexus_compose - image: sonatype/nexus3:3.65.0 - expose: - - 8081 - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - - /nexus-data:/nexus-data - restart: always - nginx: - container_name: nginx_nexus_compose - image: nginx:1.25 - ports: - - 80:80 - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - - /etc/nexus/nginx.conf:/etc/nginx/conf.d/default.conf:ro - links: - - nexus - restart: always diff --git a/deployment/safe_haven_management_environment/cloud_init/resources/nexus_nginx.mustache.conf b/deployment/safe_haven_management_environment/cloud_init/resources/nexus_nginx.mustache.conf deleted file mode 100644 index 8fc9f7c8e7..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/resources/nexus_nginx.mustache.conf +++ /dev/null @@ -1,11 +0,0 @@ -server { - listen 80; - listen [::]:80; - server_name nexus-tier{{nexus.tier}}.{{domain.fqdn}}; - - location / { - proxy_pass http://nexus:8081/; - proxy_intercept_errors on; - error_page 401 =403 /403.html; - } -} \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/cloud_init/resources/update_known_internal_mirrors.sh b/deployment/safe_haven_management_environment/cloud_init/resources/update_known_internal_mirrors.sh deleted file mode 100644 index ac84a075b1..0000000000 --- a/deployment/safe_haven_management_environment/cloud_init/resources/update_known_internal_mirrors.sh +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/bash - -echo "Update known hosts on the external server to allow connections to the internal server..." -mkdir -p /home/mirrordaemon/.ssh -echo "$1" >> /home/mirrordaemon/.ssh/known_hosts -ssh-keygen -H -f /home/mirrordaemon/.ssh/known_hosts 2>&1 -chown mirrordaemon:mirrordaemon /home/mirrordaemon/.ssh/known_hosts -rm /home/mirrordaemon/.ssh/known_hosts.old 2> /dev/null -echo "This server currently posseses fingerprint for the following internal mirrors..." -cat /home/mirrordaemon/.ssh/known_hosts - -echo "Update known IP addresses on the external server to schedule pushing to the internal server..." -echo "$2" >> /home/mirrordaemon/internal_mirror_ip_addresses.txt -cp /home/mirrordaemon/internal_mirror_ip_addresses.txt /home/mirrordaemon/internal_mirror_ip_addresses.bak -sort /home/mirrordaemon/internal_mirror_ip_addresses.bak | uniq > /home/mirrordaemon/internal_mirror_ip_addresses.txt -rm -f /home/mirrordaemon/internal_mirror_ip_addresses.bak -echo "This server is currently aware of internal mirrors at the following locations..." -cat /home/mirrordaemon/internal_mirror_ip_addresses.txt diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/DC1Bootstrap.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/DC1Bootstrap.ps1 deleted file mode 100644 index 9d45116e88..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/DC1Bootstrap.ps1 +++ /dev/null @@ -1,11 +0,0 @@ -# Here we install -# - NuGet (for module management) -# - PowerShellModule (to allow modules to be installed in DSC) -# - various x* modules (to enable DSC functions) -# Other Powershell modules should be installed through DSC -Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force; -Install-Module PowerShellModule -MinimumVersion 0.3 -Force; -Install-Module ActiveDirectoryDsc -MinimumVersion 6.2.0 -Force; -Install-Module ComputerManagementDsc -MinimumVersion 8.5.0 -Force; -Install-Module DnsServerDsc -MinimumVersion 3.0.0 -Force; -Install-Module NetworkingDsc -MinimumVersion 9.0.0 -Force; diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/DC1DesiredState.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/DC1DesiredState.ps1 deleted file mode 100755 index 8c21de41be..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/DC1DesiredState.ps1 +++ /dev/null @@ -1,807 +0,0 @@ -configuration InstallPowershellModules { - Import-DscResource -ModuleName PowerShellModule - - Node localhost { - PSModuleResource MSOnline { - Ensure = "present" - Module_Name = "MSOnline" - } - - PSModuleResource PackageManagement { - Ensure = "present" - Module_Name = "PackageManagement" - } - - PSModuleResource PowerShellGet { - Ensure = "present" - Module_Name = "PowerShellGet" - } - - PSModuleResource PSWindowsUpdate { - Ensure = "present" - Module_Name = "PSWindowsUpdate" - } - } -} - - -configuration CreatePrimaryDomainController { - param ( - [Parameter(HelpMessage = "Path to Active Directory log volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectoryLogPath, - - [Parameter(HelpMessage = "Path to Active Directory NTDS volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectoryNtdsPath, - - [Parameter(HelpMessage = "Path to Active Directory system volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectorySysvolPath, - - [Parameter(Mandatory = $true, HelpMessage = "Domain administrator credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$DomainAdministratorCredentials, - - [Parameter(Mandatory = $true, HelpMessage = "FQDN for the SHM domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainFqdn, - - [Parameter(Mandatory = $true, HelpMessage = "NetBIOS name for the domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainNetBIOSName, - - [Parameter(Mandatory = $true, HelpMessage = "VM administrator safe mode credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$SafeModeCredentials - ) - - Import-DscResource -ModuleName PSDesiredStateConfiguration - Import-DscResource -ModuleName ActiveDirectoryDsc - Import-DscResource -ModuleName ComputerManagementDsc - Import-DscResource -ModuleName NetworkingDsc - - # Construct variables for use in DSC modules - $Interface = Get-NetAdapter | Where-Object Name -Like "Ethernet*" | Select-Object -First 1 - - Node localhost { - LocalConfigurationManager { - ActionAfterReboot = "ContinueConfiguration" - ConfigurationMode = "ApplyOnly" - RebootNodeIfNeeded = $true - } - - WindowsFeature DNS { - Ensure = "Present" - Name = "DNS" - } - - WindowsFeature DnsServer { - Ensure = "Present" - Name = "RSAT-DNS-Server" - DependsOn = "[WindowsFeature]DNS" - } - - WindowsFeature ADDomainServices { - Ensure = "Present" - Name = "AD-Domain-Services" - } - - WindowsFeature ADDSTools { - Ensure = "Present" - Name = "RSAT-ADDS-Tools" - } - - WindowsFeature ADAdminCenter { - Ensure = "Present" - Name = "RSAT-AD-AdminCenter" - } - - Script EnableDNSDiags { - SetScript = { - Write-Verbose -Verbose "Enabling DNS client diagnostics" - Set-DnsServerDiagnostics -All $true - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[WindowsFeature]DnsServer" - } - - DnsServerAddress DnsServerAddress { # from NetworkingDsc - Address = "127.0.0.1" - AddressFamily = "IPv4" - InterfaceAlias = $Interface.Name - DependsOn = "[WindowsFeature]DnsServer" - } - - ADDomain PrimaryDomainController { # from ActiveDirectoryDsc - Credential = $DomainAdministratorCredentials - DatabasePath = $ActiveDirectoryNtdsPath - DomainName = $DomainFqdn - DomainNetBiosName = $DomainNetBIOSName - LogPath = $ActiveDirectoryLogPath - SafeModeAdministratorPassword = $SafeModeCredentials - SysvolPath = $ActiveDirectorySysvolPath - DependsOn = @("[DnsServerAddress]DnsServerAddress", "[WindowsFeature]ADDomainServices", "[WindowsFeature]ADDSTools") - } - - PendingReboot RebootAfterPromotion { # from ComputerManagementDsc - Name = "RebootAfterPromotion" - DependsOn = "[ADDomain]PrimaryDomainController" - } - } -} - - -configuration UploadArtifacts { - param ( - [Parameter(HelpMessage = "Absolute path to directory which blobs should be downloaded to")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsDirectory, - - [Parameter(HelpMessage = "Array of blob names to download from storage blob container")] - [ValidateNotNullOrEmpty()] - [PSCustomObject]$BlobNames, - - [Parameter(HelpMessage = "SAS token with read/list rights to the storage blob container")] - [ValidateNotNullOrEmpty()] - [string]$BlobSasToken, - - [Parameter(HelpMessage = "Name of the storage account")] - [ValidateNotNullOrEmpty()] - [string]$StorageAccountName, - - [Parameter(HelpMessage = "Name of the storage container")] - [ValidateNotNullOrEmpty()] - [string]$StorageContainerName - ) - - Node localhost { - Script EmptyDirectory { - SetScript = { - try { - Write-Verbose -Verbose "Clearing all pre-existing files and folders from '$using:ArtifactsDirectory'" - if (Test-Path -Path $using:ArtifactsDirectory) { - Get-ChildItem $using:ArtifactsDirectory -Recurse | Remove-Item -Recurse -Force - } else { - New-Item -ItemType directory -Path $using:ArtifactsDirectory - } - } catch { - Write-Error "EmptyDirectory: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { (Test-Path -Path $using:ArtifactsDirectory) -and -not (Test-Path -Path "$using:ArtifactsDirectory/*") } - } - - Script DownloadArtifacts { - SetScript = { - try { - Write-Verbose -Verbose "Downloading $($using:BlobNames.Length) files to '$using:ArtifactsDirectory'..." - foreach ($BlobName in $using:BlobNames) { - # Ensure that local directory exists - $LocalDir = Join-Path $using:ArtifactsDirectory $(Split-Path -Parent $BlobName) - if (-not (Test-Path -Path $LocalDir)) { - $null = New-Item -ItemType Directory -Path $LocalDir - } - $LocalFilePath = Join-Path $LocalDir (Split-Path -Leaf $BlobName) - - # Download file from blob storage - $BlobUrl = "https://$($using:StorageAccountName).blob.core.windows.net/$($using:StorageContainerName)/${BlobName}$($using:BlobSasToken)" - Write-Verbose -Verbose " [ ] Fetching $BlobUrl..." - $null = Invoke-WebRequest -Uri $BlobUrl -OutFile $LocalFilePath - if ($?) { - Write-Verbose -Verbose "Downloading $BlobUrl succeeded" - } else { - throw "Downloading $BlobUrl failed!" - } - } - } catch { - Write-Error "DownloadArtifacts: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[Script]EmptyDirectory" - } - } -} - - -configuration ConfigureActiveDirectory { - param ( - [Parameter(Mandatory = $true, HelpMessage = "Domain administrator credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$DomainAdministratorCredentials, - - [Parameter(HelpMessage = "Username for a user with domain admin privileges")] - [ValidateNotNullOrEmpty()] - [string]$DomainAdminUsername, - - [Parameter(HelpMessage = "Fully-qualified SHM domain name")] - [ValidateNotNullOrEmpty()] - [string]$DomainFqdn, - - [Parameter(HelpMessage = "Domain OU (eg. DC=TURINGSAFEHAVEN,DC=AC,DC=UK)")] - [ValidateNotNullOrEmpty()] - [string]$DomainDn, - - [Parameter(Mandatory = $true, HelpMessage = "NetBIOS name for the domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainNetBIOSName, - - [Parameter(HelpMessage = "Array of OU names to create")] - [ValidateNotNullOrEmpty()] - [string[]]$OuNames, - - [Parameter(HelpMessage = "Array of security group names to create")] - [ValidateNotNullOrEmpty()] - [hashtable[]]$SecurityGroups, - - [Parameter(HelpMessage = "DN for service accounts OU")] - [ValidateNotNullOrEmpty()] - [string]$ServiceAccountsOuDn, - - [Parameter(HelpMessage = "User accounts to create")] - [ValidateNotNullOrEmpty()] - [hashtable[]]$UserAccounts - ) - - Import-DscResource -Module ActiveDirectoryDsc - - $localAdSyncUser = $UserAccounts | Where-Object { $_.key -eq "aadLocalSync" } - $computerManagersSG = $SecurityGroups | Where-Object { $_.key -eq "computerManagers" } - $serverAdminsSG = $SecurityGroups | Where-Object { $_.key -eq "serverAdmins" } - $guidMap = @{ - "lockoutTime" = "28630ebf-41d5-11d1-a9c1-0000f80367c1"; - "mS-DS-ConsistencyGuid" = "23773dc2-b63a-11d2-90e1-00c04fd91ab1"; - "msDS-KeyCredentialLink" = "5b47d60f-6090-40b2-9f37-2a4de88f3063"; - "pwdLastSet" = "bf967a0a-0de6-11d0-a285-00aa003049e2"; - "user" = "bf967aba-0de6-11d0-a285-00aa003049e2"; - } - $extendedrightsmap = @{ - "Change Password" = "ab721a53-1e2f-11d0-9819-00aa0040529b"; - "Reset Password" = "00299570-246d-11d0-a768-00aa006e0529"; - } - - Node localhost { - # Create organisational units - foreach ($ouName in $OuNames) { - ADOrganizationalUnit $ouName { # from ActiveDirectoryDsc - Credential = $DomainAdministratorCredentials - Description = $ouName - Ensure = "Present" - Name = $ouName - Path = $DomainDn - ProtectedFromAccidentalDeletion = $true - } - } - - # Create service users - foreach ($userAccount in $UserAccounts) { - ADUser "$($userAccount.name)" { - Description = $userAccount.name - DisplayName = $userAccount.name - DomainName = $DomainFqdn - Ensure = "Present" - Password = $userAccount.credentials - PasswordNeverExpires = $true - Path = $ServiceAccountsOuDn - UserName = $userAccount.credentials.UserName - } - } - - # Create security groups - foreach ($securityGroup in $SecurityGroups) { - $Members = @() - # Add domain admin to server administrators group - if ($securityGroup.name -eq $serverAdminsSG.name) { - $Members = @($DomainAdminUsername) - # Add service users to computer managers group (except the localAdSync user) - } elseif ($computerManagersSG.name -eq $serverAdminsSG.name) { - $Members = $UserAccounts | Where-Object { $_.key -ne $localAdSyncUser.key } | ForEach-Object { $_.credentials.UserName } - } - ADGroup "$($securityGroup.name)" { # from ActiveDirectoryDsc - Category = "Security" - Description = $securityGroup.name - Ensure = "Present" - GroupName = $securityGroup.name - GroupScope = "Global" - Members = $Members - Path = $SecurityGroup.dn - } - } - - # Enable the AD recycle bin - ADOptionalFeature RecycleBin { # from ActiveDirectoryDsc - EnterpriseAdministratorCredential = $DomainAdministratorCredentials - FeatureName = "Recycle Bin Feature" - ForestFQDN = $DomainFqdn - } - - # Set domain admin password to never expire - ADUser SetAdminPasswordExpiry { - UserName = $DomainAdminUsername - DomainName = $DomainFqdn - PasswordNeverExpires = $true - } - - # Disable minimum password age - ADDomainDefaultPasswordPolicy DisableMinimumPasswordAge { - Credential = $DomainAdministratorCredentials - DomainName = $DomainFqdn - MinPasswordAge = 0 - } - - # Give write permissions to the local AD sync account - foreach ($property in @("lockoutTime", "pwdLastSet", "mS-DS-ConsistencyGuid", "msDS-KeyCredentialLink")) { - ADObjectPermissionEntry "$property" { - AccessControlType = "Allow" - ActiveDirectoryRights = "WriteProperty" - ActiveDirectorySecurityInheritance = "Descendents" - Ensure = "Present" - IdentityReference = "${DomainNetBIOSName}\$($localAdSyncUser.credentials.UserName)" - InheritedObjectType = $guidMap["user"] - ObjectType = $guidmap[$property] - Path = $DomainDn - } - } - - # Give extended rights to the local AD sync account - foreach ($extendedRight in @("Change Password", "Reset Password")) { - ADObjectPermissionEntry "$extendedRight" { - AccessControlType = "Allow" - ActiveDirectoryRights = "ExtendedRight" - ActiveDirectorySecurityInheritance = "Descendents" - Ensure = "Present" - IdentityReference = "${DomainNetBIOSName}\$($localAdSyncUser.credentials.UserName)" - InheritedObjectType = $guidMap["user"] - ObjectType = $extendedrightsmap[$extendedRight] - Path = $DomainDn - } - } - - # Allow the local AD sync account to replicate directory changes - Script SetLocalAdSyncPermissions { - SetScript = { - try { - $success = $true - $rootDse = Get-ADRootDSE - $aadLocalSyncSID = (Get-ADUser $using:localAdSyncUser.credentials.UserName).SID - $null = dsacls "$($rootDse.DefaultNamingContext)" /G "${aadLocalSyncSID}:CA;Replicating Directory Changes" - $success = $success -and $? - $null = dsacls "$($rootDse.ConfigurationNamingContext)" /G "${aadLocalSyncSID}:CA;Replicating Directory Changes" - $success = $success -and $? - $null = dsacls "$($rootDse.DefaultNamingContext)" /G "${aadLocalSyncSID}:CA;Replicating Directory Changes All" - $success = $success -and $? - $null = dsacls "$($rootDse.ConfigurationNamingContext)" /G "${aadLocalSyncSID}:CA;Replicating Directory Changes All" - $success = $success -and $? - if ($success) { - Write-Verbose -Verbose "Successfully updated ACL permissions for AD Sync Service account '$($using:localAdSyncUsercredentials.UserName)'" - } else { - throw "Failed to update ACL permissions for AD Sync Service account '$($using:localAdSyncUsercredentials.UserName)'!" - } - } catch { - Write-Error "SetLocalAdSyncPermissions: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[ADUser]$($localAdSyncUser.name)" - } - - # Delegate Active Directory permissions to users/groups that allow them to register computers in the domain - Script SetComputerRegistrationPermissions { - SetScript = { - try { - foreach ($userAccount in $using:UserAccounts) { - $success = $true - if (-not $userAccount.groupOu) { continue } - $organisationalUnit = Get-ADObject -Filter "distinguishedName -eq '$($userAccount.groupOu)'" - $userPrincipalName = "$($using:DomainNetBiosName)\$($userAccount.credentials.UserName)" - # Add permission to create child computer objects - $null = dsacls $organisationalUnit /I:T /G "${userPrincipalName}:CC;computer" - $success = $success -and $? - # Give 'write property' permissions over several attributes of child computer objects - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;DNS Host Name Attributes;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;msDS-SupportedEncryptionTypes;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;operatingSystem;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;operatingSystemVersion;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;operatingSystemServicePack;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;sAMAccountName;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;servicePrincipalName;computer" - $success = $success -and $? - $null = dsacls $organisationalUnit /I:S /G "${userPrincipalName}:WP;userPrincipalName;computer" - $success = $success -and $? - } - if ($success) { - Write-Verbose -Verbose "Successfully delegated Active Directory permissions on '$($userAccount.groupOu)' to '$userPrincipalName'" - } else { - throw "Failed to delegate Active Directory permissions on '$($userAccount.groupOu)' to '$userPrincipalName'!" - } - } catch { - Write-Error "SetComputerRegistrationPermissions: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[Script]SetLocalAdSyncPermissions" - } - } -} - - -configuration ApplyGroupPolicies { - param ( - [Parameter(HelpMessage = "Path to Active Directory system volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectorySysvolPath, - - [Parameter(HelpMessage = "Absolute path to directory which blobs should be downloaded to")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsDirectory, - - [Parameter(HelpMessage = "Fully-qualified SHM domain name")] - [ValidateNotNullOrEmpty()] - [string]$DomainFqdn, - - [Parameter(HelpMessage = "Domain OU (eg. DC=TURINGSAFEHAVEN,DC=AC,DC=UK)")] - [ValidateNotNullOrEmpty()] - [string]$DomainDn, - - [Parameter(HelpMessage = "Database servers OU name (eg. 'Secure Research Environment Database Servers')")] - [ValidateNotNullOrEmpty()] - [string]$OuNameDatabaseServers, - - [Parameter(HelpMessage = "Identity servers OU name (eg. 'Safe Haven Identity Servers')")] - [ValidateNotNullOrEmpty()] - [string]$OuNameIdentityServers, - - [Parameter(HelpMessage = "Linux servers OU name (eg. 'Secure Research Environment Linux Servers')")] - [ValidateNotNullOrEmpty()] - [string]$OuNameLinuxServers, - - [Parameter(HelpMessage = "Research users OU name (eg. 'Safe Haven Research Users')")] - [ValidateNotNullOrEmpty()] - [string]$OuNameResearchUsers, - - [Parameter(HelpMessage = "Security groups OU name (eg. 'Safe Haven Security Groups')")] - [ValidateNotNullOrEmpty()] - [string]$OuNameSecurityGroups, - - [Parameter(HelpMessage = "Service accounts OU name (eg. 'Safe Haven Service Accounts')")] - [ValidateNotNullOrEmpty()] - [string]$OuNameServiceAccounts, - - [Parameter(HelpMessage = "Name of the server administrator group")] - [ValidateNotNullOrEmpty()] - [string]$ServerAdminSgName - ) - - # Construct variables for use in DSC modules - $GpoOutputPath = Join-Path $ArtifactsDirectory "GPOs" - - Node localhost { - Script ExtractGroupPolicies { - SetScript = { - try { - Write-Verbose -Verbose "Extracting GPO zip files..." - Expand-Archive "$($using:ArtifactsDirectory)\GPOs.zip" -DestinationPath $using:ArtifactsDirectory -Force - if ($?) { - Write-Verbose -Verbose "Successfully extracted GPO zip files" - } else { - throw "Failed to extract GPO zip files" - } - } catch { - Write-Error "ExtractGroupPolicies: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { Test-Path -Path "$($using:GpoOutputPath)/*" } - } - - Script ImportGroupPolicies { - SetScript = { - try { - Write-Verbose -Verbose "Importing GPOs..." - foreach ($sourceTargetPair in (("0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C", "All servers - Local Administrators"), - ("EE9EF278-1F3F-461C-9F7A-97F2B82C04B4", "All Servers - Windows Update"), - ("742211F9-1482-4D06-A8DE-BA66101933EB", "All Servers - Windows Services"))) { - $source, $target = $sourceTargetPair - $null = Import-GPO -BackupId "$source" -TargetName "$target" -Path $using:GpoOutputPath -CreateIfNeeded - if ($?) { - Write-Verbose -Verbose "Importing '$source' to '$target' succeeded" - } else { - throw "Importing '$source' to '$target' failed!" - } - } - } catch { - Write-Error "ImportGroupPolicies: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[Script]ExtractGroupPolicies" - } - - Script LinkGroupPoliciesToOus { - SetScript = { - try { - Write-Verbose -Verbose "Linking GPOs to OUs..." - foreach ($gpoOuNamePair in (("All servers - Local Administrators", "$using:OuNameDatabaseServers"), - ("All servers - Local Administrators", "$using:OuNameIdentityServers"), - ("All Servers - Windows Services", "Domain Controllers"), - ("All Servers - Windows Services", "$using:OuNameDatabaseServers"), - ("All Servers - Windows Services", "$using:OuNameIdentityServers"), - ("All Servers - Windows Update", "Domain Controllers"), - ("All Servers - Windows Update", "$using:OuNameDatabaseServers"), - ("All Servers - Windows Update", "$using:OuNameIdentityServers"))) { - $gpoName, $ouName = $gpoOuNamePair - $gpo = Get-GPO -Name "$gpoName" - # Check for a match in existing GPOs - [xml]$gpoReportXML = Get-GPOReport -Guid $gpo.Id -ReportType xml - $hasGPLink = (@($gpoReportXML.GPO.LinksTo | Where-Object { ($_.SOMName -like "*${ouName}*") -and ($_.SOMPath -eq "$($using:DomainFqdn)/${ouName}") }).Count -gt 0) - # Create a GP link if it doesn't already exist - if ($hasGPLink) { - Write-Verbose -Verbose "GPO '$gpoName' already linked to '$ouName'" - } else { - $null = New-GPLink -Guid $gpo.Id -Target "OU=${ouName},$($using:DomainDn)" -LinkEnabled Yes - if ($?) { - Write-Verbose -Verbose "Linking GPO '$gpoName' to '$ouName' succeeded" - } else { - throw "Linking GPO '$gpoName' to '$ouName' failed!" - } - } - } - } catch { - Write-Error "LinkGroupPoliciesToOus: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[Script]ImportGroupPolicies" - } - - Script GiveDomainAdminsLocalPrivileges { - SetScript = { - try { - # Get SID for the Local Administrators group - $localAdminGroupName = "All servers - Local Administrators" - $localAdminGpo = Get-GPO -Name $localAdminGroupName - [xml]$gpoReportXML = Get-GPOReport -Guid $localAdminGpo.ID -ReportType xml - foreach ($group in $gpoReportXML.GPO.Computer.ExtensionData.Extension.RestrictedGroups) { - if ($group.GroupName.Name.'#text' -eq "BUILTIN\Administrators") { - $localAdminGroupSID = $group.GroupName.SID.'#text' - } - } - if ($localAdminGroupSID) { - Write-Verbose -Verbose "Local admin group '$localAdminGroupName' group has ID $localAdminGroupSID" - } else { - throw "ID for local admin group '$localAdminGroupName' could not be found!" - } - - # Edit GptTmpl file controlling which domain users should be considered local administrators - Write-Verbose -Verbose "Ensuring that members of '$using:serverAdminSgName' are local administrators" - $GptTmplString = @( - '[Unicode]', - 'Unicode=yes', - '[Version]', - 'signature="$CHICAGO$"', - 'Revision=1', - '[Group Membership]', - "*${localAdminGroupSID}__Memberof =", - "*${localAdminGroupSID}__Members = $using:serverAdminSgName" - ) -join "`n" - Set-Content -Path "$($using:ActiveDirectorySysvolPath)\domain\Policies\{$($localAdminGpo.ID)}\Machine\Microsoft\Windows NT\SecEdit\GptTmpl.inf" -Value "$GptTmplString" - if ($?) { - Write-Verbose -Verbose "Successfully set group policies for 'Local Administrators'" - } else { - throw "Failed to set group policies for 'Local Administrators'" - } - } catch { - Write-Error "GiveDomainAdminsLocalPrivileges: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { $false } - DependsOn = "[Script]ImportGroupPolicies" - } - } -} - - -configuration ConfigureDns { - param ( - [Parameter(Mandatory = $true, HelpMessage = "IP address for the external (Azure) DNS resolver")] - [ValidateNotNullOrEmpty()] - [string]$ExternalDnsResolver, - - [Parameter(Mandatory = $true, HelpMessage = "IP addresses for the domain controllers")] - [ValidateNotNullOrEmpty()] - [string]$IdentitySubnetCidr - ) - - Import-DscResource -ModuleName DnsServerDsc - - # # Construct variables for passing to DSC configurations - $IpOctets = $IdentitySubnetCidr.Split(".") - $ZoneName = "$($IpOctets[2]).$($IpOctets[1]).$($IpOctets[0]).in-addr.arpa" - - Node localhost { - # Use Microsoft Azure DNS server for resolving external addresses - DnsServerForwarder DnsServerForwarder { - IPAddresses = @($ExternalDnsResolver) - IsSingleInstance = "Yes" - } - - # Ensure that reverse lookup zone exists - Script ReverseLookupZone { - SetScript = { - try { - Write-Verbose -Verbose "Creating reverse-lookup zone for '$using:IdentitySubnetCidr'..." - Add-DnsServerPrimaryZone -NetworkID $using:IdentitySubnetCidr -ReplicationScope "Forest" - if ($?) { - Write-Verbose -Verbose "Successfully created reverse-lookup zone for '$using:IdentitySubnetCidr'" - } else { - throw "Failed to create reverse-lookup zone for '$using:IdentitySubnetCidr'!" - } - } catch { - Write-Error "ReverseLookupZone: $($_.Exception)" - } - } - GetScript = { @{} } - TestScript = { - if (Get-DnsServerZone -Name $using:ZoneName -ErrorAction SilentlyContinue | Where-Object { $_.IsReverseLookupZone }) { - Write-Verbose -Verbose "Reverse-lookup zone for '$using:IdentitySubnetCidr' already exists" - return $true - } - return $false - } - DependsOn = "[DnsServerForwarder]DnsServerForwarder" - } - } -} - - -configuration ConfigurePrimaryDomainController { - param ( - [Parameter(Mandatory = $true, HelpMessage = "Active Directory base path")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectoryBasePath, - - [Parameter(Mandatory = $true, HelpMessage = "VM administrator credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$AdministratorCredentials, - - [Parameter(Mandatory = $true, HelpMessage = "Base-64 encoded array of blob names to download from storage blob container")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsBlobNamesB64, - - [Parameter(Mandatory = $true, HelpMessage = "Base-64 encoded SAS token with read/list rights to the storage blob container")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsBlobSasTokenB64, - - [Parameter(Mandatory = $true, HelpMessage = "Name of the artifacts storage account")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsStorageAccountName, - - [Parameter(Mandatory = $true, HelpMessage = "Name of the artifacts storage container")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsStorageContainerName, - - [Parameter(Mandatory = $true, HelpMessage = "Absolute path to directory which blobs should be downloaded to")] - [ValidateNotNullOrEmpty()] - [string]$ArtifactsTargetDirectory, - - [Parameter(Mandatory = $true, HelpMessage = "Domain OU (eg. DC=TURINGSAFEHAVEN,DC=AC,DC=UK)")] - [ValidateNotNullOrEmpty()] - [string]$DomainDn, - - [Parameter(Mandatory = $true, HelpMessage = "FQDN for the SHM domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainFqdn, - - [Parameter(Mandatory = $true, HelpMessage = "NetBIOS name for the domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainNetBIOSName, - - [Parameter(Mandatory = $true, HelpMessage = "Base-64 encoded domain organisational units")] - [ValidateNotNullOrEmpty()] - [string]$DomainOusB64, - - [Parameter(Mandatory = $true, HelpMessage = "Base64-encoded security groups")] - [ValidateNotNullOrEmpty()] - [string]$DomainSecurityGroupsB64, - - [Parameter(Mandatory = $true, HelpMessage = "IP address for the external (Azure) DNS resolver")] - [ValidateNotNullOrEmpty()] - [string]$ExternalDnsResolver, - - [Parameter(Mandatory = $true, HelpMessage = "IP addresses for the domain controllers")] - [ValidateNotNullOrEmpty()] - [string]$IdentitySubnetCidr, - - [Parameter(Mandatory = $true, HelpMessage = "VM administrator safe mode credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$SafeModeCredentials, - - [Parameter(Mandatory = $true, HelpMessage = "Base-64 encoded user accounts")] - [ValidateNotNullOrEmpty()] - [string]$UserAccountsB64 - ) - - # Construct variables for passing to DSC configurations - $activeDirectoryLogPath = Join-Path $ActiveDirectoryBasePath "Logs" - $activeDirectoryNtdsPath = Join-Path $ActiveDirectoryBasePath "NTDS" - $activeDirectorySysvolPath = Join-Path $ActiveDirectoryBasePath "SYSVOL" - $artifactsBlobNames = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($ArtifactsBlobNamesB64)) | ConvertFrom-Json - $artifactsBlobSasToken = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($ArtifactsBlobSasTokenB64)) - $domainAdministratorCredentials = New-Object System.Management.Automation.PSCredential ("${DomainFqdn}\$($AdministratorCredentials.UserName)", $AdministratorCredentials.Password) - $domainOus = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($DomainOusB64)) | ConvertFrom-Json - $ouNames = $domainOus.PSObject.Members | Where-Object { $_.TypeNameOfValue -eq "System.Management.Automation.PSCustomObject" } | ForEach-Object { $_.Value.name } - $securityGroups = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($DomainSecurityGroupsB64)) | ConvertFrom-Json - $securityGroupsHashtable = $securityGroups | ForEach-Object { $_.PSObject.Members } | Where-Object { $_.TypeNameOfValue -eq "System.Management.Automation.PSCustomObject" } | ForEach-Object { @{ "key" = $_.Name; "name" = $_.Value.name; "dn" = "OU=$($domainOus.securityGroups.name),${DomainDn}" } } - $userAccountsHashtable = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($UserAccountsB64)) | ConvertFrom-Json | ForEach-Object { $_.PSObject.Members } | Where-Object { $_.TypeNameOfValue -eq "System.Management.Automation.PSCustomObject" } | ForEach-Object { @{ "key" = $_.Name; "name" = $_.Value.name; "groupOu" = $domainOus."$($_.Name)".path; "credentials" = (New-Object System.Management.Automation.PSCredential ($_.Value.samAccountName, (ConvertTo-SecureString $_.Value.password -AsPlainText -Force))) } } - - Node localhost { - InstallPowershellModules InstallPowershellModules {} - - CreatePrimaryDomainController CreatePrimaryDomainController { - ActiveDirectoryLogPath = $activeDirectoryLogPath - ActiveDirectoryNtdsPath = $activeDirectoryNtdsPath - ActiveDirectorySysvolPath = $activeDirectorySysvolPath - DomainAdministratorCredentials = $domainAdministratorCredentials - DomainFqdn = $DomainFqdn - DomainNetBiosName = $DomainNetBiosName - SafeModeCredentials = $SafeModeCredentials - } - - UploadArtifacts UploadArtifacts { - BlobNames = $artifactsBlobNames - BlobSasToken = $artifactsBlobSasToken - StorageAccountName = $ArtifactsStorageAccountName - StorageContainerName = $ArtifactsStorageContainerName - ArtifactsDirectory = $ArtifactsTargetDirectory - DependsOn = "[CreatePrimaryDomainController]CreatePrimaryDomainController" - } - - ConfigureActiveDirectory ConfigureActiveDirectory { - DomainAdministratorCredentials = $domainAdministratorCredentials - DomainAdminUsername = $AdministratorCredentials.UserName - DomainFqdn = $DomainFqdn - DomainDn = $DomainDn - DomainNetBiosName = $DomainNetBiosName - OuNames = $ouNames - SecurityGroups = $securityGroupsHashtable - ServiceAccountsOuDn = "OU=$($domainOus.serviceAccounts.name),${DomainDn}" - UserAccounts = $userAccountsHashtable - DependsOn = @("[CreatePrimaryDomainController]CreatePrimaryDomainController", "[UploadArtifacts]UploadArtifacts") - } - - ApplyGroupPolicies ApplyGroupPolicies { - ActiveDirectorySysvolPath = $activeDirectorySysvolPath - ArtifactsDirectory = $ArtifactsTargetDirectory - DomainFqdn = $DomainFqdn - DomainDn = $DomainDn - OuNameDatabaseServers = $domainOus.databaseServers.name - OuNameIdentityServers = $domainOus.identityServers.name - OuNameLinuxServers = $domainOus.linuxServers.name - OuNameResearchUsers = $domainOus.researchUsers.name - OuNameSecurityGroups = $domainOus.securityGroups.name - OuNameServiceAccounts = $domainOus.serviceAccounts.name - ServerAdminSgName = $securityGroups.serverAdmins.name - DependsOn = @("[UploadArtifacts]UploadArtifacts", "[ConfigureActiveDirectory]ConfigureActiveDirectory") - } - - ConfigureDns ConfigureDns { - ExternalDnsResolver = $ExternalDnsResolver - IdentitySubnetCidr = $IdentitySubnetCidr - } - } -} diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/DC2Bootstrap.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/DC2Bootstrap.ps1 deleted file mode 100644 index 9d45116e88..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/DC2Bootstrap.ps1 +++ /dev/null @@ -1,11 +0,0 @@ -# Here we install -# - NuGet (for module management) -# - PowerShellModule (to allow modules to be installed in DSC) -# - various x* modules (to enable DSC functions) -# Other Powershell modules should be installed through DSC -Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force; -Install-Module PowerShellModule -MinimumVersion 0.3 -Force; -Install-Module ActiveDirectoryDsc -MinimumVersion 6.2.0 -Force; -Install-Module ComputerManagementDsc -MinimumVersion 8.5.0 -Force; -Install-Module DnsServerDsc -MinimumVersion 3.0.0 -Force; -Install-Module NetworkingDsc -MinimumVersion 9.0.0 -Force; diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/DC2DesiredState.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/DC2DesiredState.ps1 deleted file mode 100755 index b9ab85bfcb..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/DC2DesiredState.ps1 +++ /dev/null @@ -1,178 +0,0 @@ -configuration InstallPowershellModules { - Import-DscResource -ModuleName PowerShellModule - - Node localhost { - PSModuleResource MSOnline { - Ensure = "present" - Module_Name = "MSOnline" - } - - PSModuleResource PackageManagement { - Ensure = "present" - Module_Name = "PackageManagement" - } - - PSModuleResource PowerShellGet { - Ensure = "present" - Module_Name = "PowerShellGet" - } - - PSModuleResource PSWindowsUpdate { - Ensure = "present" - Module_Name = "PSWindowsUpdate" - } - } -} - - -configuration CreateSecondaryDomainController { - param ( - [Parameter(HelpMessage = "Path to Active Directory log volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectoryLogPath, - - [Parameter(HelpMessage = "Path to Active Directory NTDS volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectoryNtdsPath, - - [Parameter(HelpMessage = "Path to Active Directory system volume")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectorySysvolPath, - - [Parameter(Mandatory = $true, HelpMessage = "Domain administrator credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$DomainAdministratorCredentials, - - [Parameter(Mandatory = $true, HelpMessage = "FQDN for the SHM domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainFqdn, - - [Parameter(Mandatory, HelpMessage = "Private IP address of primary domain controller")] - [ValidateNotNullOrEmpty()] - [String]$PrimaryDomainControllerIp, - - [Parameter(Mandatory = $true, HelpMessage = "VM administrator safe mode credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$SafeModeCredentials - ) - - Import-DscResource -ModuleName PSDesiredStateConfiguration - Import-DscResource -ModuleName ActiveDirectoryDsc - Import-DscResource -ModuleName ComputerManagementDsc - Import-DscResource -ModuleName NetworkingDsc - - # Construct variables for use in DSC modules - $Interface = Get-NetAdapter | Where-Object { $_.Name -Like "Ethernet*" } | Select-Object -First 1 - - Node localhost { - LocalConfigurationManager { - ActionAfterReboot = "ContinueConfiguration" - ConfigurationMode = "ApplyOnly" - RebootNodeIfNeeded = $true - } - - WindowsFeature ADDomainServices { - Ensure = "Present" - Name = "AD-Domain-Services" - } - - WindowsFeature ADDSTools { - Ensure = "Present" - Name = "RSAT-ADDS-Tools" - } - - WindowsFeature ADAdminCenter { - Ensure = "Present" - Name = "RSAT-AD-AdminCenter" - } - - WindowsFeature ADPowerShell { - Ensure = "Present" - Name = "RSAT-AD-PowerShell" - } - - DnsServerAddress DnsServerAddress { # from NetworkingDsc - Address = $PrimaryDomainControllerIp - AddressFamily = "IPv4" - InterfaceAlias = $Interface.Name - } - - WaitForADDomain WaitForestAvailability { # from ActiveDirectoryDsc - Credential = $DomainAdministratorCredentials - DomainName = $DomainFqdn - DependsOn = @("[WindowsFeature]ADPowerShell", "[WindowsFeature]ADDomainServices", "[DnsServerAddress]DnsServerAddress") - } - - ADDomainController SecondaryDomainController { # from ActiveDirectoryDsc - Credential = $DomainAdministratorCredentials - DatabasePath = $ActiveDirectoryNtdsPath - DomainName = $DomainFqdn - LogPath = $ActiveDirectoryLogPath - SafeModeAdministratorPassword = $SafeModeCredentials - SysvolPath = $ActiveDirectorySysvolPath - DependsOn = "[WaitForADDomain]WaitForestAvailability" - } - - WindowsFeature DNS { # Promotion to SecondaryDomainController should have already enabled this but we ensure it here - Ensure = "Present" - Name = "DNS" - DependsOn = "[ADDomainController]SecondaryDomainController" - } - - WindowsFeature DnsServer { # Promotion to SecondaryDomainController should have already enabled this but we ensure it here - Ensure = "Present" - Name = "RSAT-DNS-Server" - DependsOn = "[WindowsFeature]DNS" - } - - PendingReboot RebootAfterPromotion { # from ComputerManagementDsc - Name = "RebootAfterDCPromotion" - DependsOn = "[WindowsFeature]DnsServer" - } - } -} - - -configuration ConfigureSecondaryDomainController { - param ( - [Parameter(Mandatory = $true, HelpMessage = "Active Directory base path")] - [ValidateNotNullOrEmpty()] - [string]$ActiveDirectoryBasePath, - - [Parameter(Mandatory = $true, HelpMessage = "VM administrator credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$AdministratorCredentials, - - [Parameter(Mandatory = $true, HelpMessage = "FQDN for the SHM domain")] - [ValidateNotNullOrEmpty()] - [String]$DomainFqdn, - - [Parameter(Mandatory, HelpMessage = "Private IP address of primary domain controller")] - [ValidateNotNullOrEmpty()] - [String]$PrimaryDomainControllerIp, - - [Parameter(Mandatory = $true, HelpMessage = "VM administrator safe mode credentials")] - [ValidateNotNullOrEmpty()] - [System.Management.Automation.PSCredential]$SafeModeCredentials - ) - - # Construct variables for passing to DSC configurations - $activeDirectoryLogPath = Join-Path $ActiveDirectoryBasePath "Logs" - $activeDirectoryNtdsPath = Join-Path $ActiveDirectoryBasePath "NTDS" - $activeDirectorySysvolPath = Join-Path $ActiveDirectoryBasePath "SYSVOL" - $domainAdministratorCredentials = New-Object System.Management.Automation.PSCredential ("${DomainFqdn}\$($AdministratorCredentials.UserName)", $AdministratorCredentials.Password) - - Node localhost { - InstallPowershellModules InstallPowershellModules {} - - CreateSecondaryDomainController CreateSecondaryDomainController { - ActiveDirectoryLogPath = $activeDirectoryLogPath - ActiveDirectoryNtdsPath = $activeDirectoryNtdsPath - ActiveDirectorySysvolPath = $activeDirectorySysvolPath - DomainAdministratorCredentials = $domainAdministratorCredentials - DomainFqdn = $DomainFqdn - PrimaryDomainControllerIp = $PrimaryDomainControllerIp - SafeModeCredentials = $SafeModeCredentials - } - } -} diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/CreateUsers.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/CreateUsers.ps1 deleted file mode 100644 index a99dc67335..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/CreateUsers.ps1 +++ /dev/null @@ -1,59 +0,0 @@ -param ( - [Parameter(Mandatory = $true, HelpMessage = "Path to the CSV file of users")] - [ValidateNotNullOrEmpty()] - [string]$userFilePath -) - -$domain = (Get-ADForest -Current LocalComputer).Domains -$userOuPath = (Get-ADObject -Filter * | Where-Object { $_.Name -eq "Safe Haven Research Users" }).DistinguishedName - -Add-Type -AssemblyName System.Web -$Description = "Research User" - -Import-Csv $userFilePath | ForEach-Object { - $UserPrincipalName = "$($_.SamAccountName)@${domain}" - $DisplayName = "$($_.GivenName) $($_.Surname)" - $password = [System.Web.Security.Membership]::GeneratePassword(12, 3) - $props = @{ - SamAccountName = $_.SamAccountName - UserPrincipalName = $UserPrincipalName - Name = "$DisplayName" - DisplayName = "$DisplayName" - GivenName = $_.GivenName - SurName = $_.Surname - Department = $Department - Description = $Description - Path = "$userOuPath" - Enabled = $True - AccountPassword = (ConvertTo-SecureString $Password -AsPlainText -Force) - PasswordNeverExpires = $True - Mobile = $_.Mobile - Email = $_.SecondaryEmail - Country = "GB" - } - - # Create user and add them to the group if requested - try { - $null = New-ADUser @props -PassThru - if ($_.GroupName) { - foreach ($group in $($_.GroupName.Split("|"))) { - Write-Output "Adding user to group '$group'" - Add-ADGroupMember "$group" $props.SamAccountName - } - } - Write-Output "Created a user with UserPrincipalName '$UserPrincipalName'" - } catch [Microsoft.ActiveDirectory.Management.ADIdentityAlreadyExistsException] { - Write-Output "User with UserPrincipalName '$UserPrincipalName' already exists" - } -} - -# Force sync with AzureAD. It will still take around 5 minutes for changes to propagate -Write-Output "Synchronising locally Active Directory with Azure" -try { - Import-Module -Name "C:\Program Files\Microsoft Azure AD Sync\Bin\ADSync" -ErrorAction Stop - Start-ADSyncSyncCycle -PolicyType Delta -} catch [System.IO.FileNotFoundException] { - Write-Output "Skipping as Azure AD Sync is not installed" -} catch { - Write-Output "Unable to run Azure Active Directory synchronisation!" -} diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/Disconnect_AD.mustache.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/Disconnect_AD.mustache.ps1 deleted file mode 100644 index 7048770076..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/Disconnect_AD.mustache.ps1 +++ /dev/null @@ -1,35 +0,0 @@ -# NB. This solves the issue of orphaned AAD users when the local AD is deleted -# https://support.microsoft.com/en-gb/help/2619062/you-can-t-manage-or-remove-objects-that-were-synchronized-through-the - -# Ensure that MSOnline is installed for current user -if (-Not (Get-Module -ListAvailable -Name MSOnline)) { - [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - Install-Module -Name MSOnline -Force -} - -if (Get-Module -ListAvailable -Name MSOnline) { - Write-Output "Please use the username and password for an Azure AD global admin. Don't forget the @{{domain.fqdn}} on the end of the username!" - Connect-MsolService - # Print the current synchronisation status - if ((Get-MSOLCompanyInformation).DirectorySynchronizationEnabled) { - Write-Output "[ ] Directory synchronisation is ENABLED" - Write-Output "Removing synchronised users..." - Get-MsolUser -Synchronized | Remove-MsolUser -Force - Write-Output "Disabling directory synchronisation..." - Set-MsolDirSyncEnabled -EnableDirSync $False -Force - # Print the current synchronisation status - if ((Get-MSOLCompanyInformation).DirectorySynchronizationEnabled) { - Write-Output "[x] Directory synchronisation is still ENABLED" - } else { - Write-Output "[o] Directory synchronisation is now DISABLED" - } - } else { - Write-Output "[o] Directory synchronisation is already DISABLED" - } - # Remove user-added service principals except the MFA service principal - Write-Output "Removing any user-added service principals..." - $nServicePrincipalsBefore = (Get-MsolServicePrincipal | Measure-Object).Count - Get-MsolServicePrincipal | Where-Object { $_.AppPrincipalId -ne "981f26a1-7f43-403b-a875-f8b09b8cd720" } | Remove-MsolServicePrincipal 2>&1 | Out-Null - $nServicePrincipalsAfter = (Get-MsolServicePrincipal | Measure-Object).Count - Write-Output "[o] Removed $($nServicePrincipalsBefore - $nServicePrincipalsAfter) service principal(s). There are $nServicePrincipalsAfter remaining" -} diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/GPOs.zip b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/GPOs.zip deleted file mode 100644 index 335f1080df..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/GPOs.zip and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/Run_ADSync.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/Run_ADSync.ps1 deleted file mode 100644 index 205a0e2ac7..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/Run_ADSync.ps1 +++ /dev/null @@ -1,9 +0,0 @@ -param( - [Parameter(Position = 0, HelpMessage = "Type of synchronisation ('Initial' or 'Delta')")] - [ValidateSet("Initial", "Delta")] - [String] - $sync = "Delta" -) - -Import-Module -Name "C:\Program Files\Microsoft Azure AD Sync\Bin\ADSync" -ErrorAction Stop -Start-ADSyncSyncCycle -PolicyType $sync diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/StartMenuLayoutModification.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/StartMenuLayoutModification.xml deleted file mode 100644 index cc29ae42db..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/StartMenuLayoutModification.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/UpdateAADSyncRule.ps1 b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/UpdateAADSyncRule.ps1 deleted file mode 100644 index 675981bba3..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/UpdateAADSyncRule.ps1 +++ /dev/null @@ -1,41 +0,0 @@ -Import-Module -Name "C:\Program Files\Microsoft Azure AD Sync\Bin\ADSync" -Force -ErrorAction Stop - -# Create a new rule that is a copy of the default rule -$defaultRule = Get-ADSyncRule | Where-Object { $_.Name -eq "Out to AAD - User Join" } -$newRule = New-ADSyncRule ` - -Name 'Out to AAD - User Join' ` - -Description $defaultRule.Description ` - -Direction 'Outbound' ` - -Precedence $defaultRule.Precedence ` - -PrecedenceAfter $defaultRule.PrecedenceAfter ` - -PrecedenceBefore $defaultRule.PrecedenceBefore ` - -SourceObjectType $defaultRule.SourceObjectType ` - -TargetObjectType $defaultRule.TargetObjectType ` - -Connector $defaultRule.Connector ` - -LinkType $defaultRule.LinkType ` - -SoftDeleteExpiryInterval $defaultRule.SoftDeleteExpiryInterval ` - -ImmutableTag '' ` - -EnablePasswordSync - -# Copy all flow mappings except the usage location one -foreach ($flow in ($defaultRule.AttributeFlowMappings | Where-Object { $_.Destination -ne "usageLocation" })) { - $params = @{ - Destination = $flow.Destination - FlowType = $flow.FlowType - ValueMergeType = $flow.ValueMergeType - } - if ($flow.Source) { $params["Source"] = $flow.Source } - if ($flow.Expression) { $params["Expression"] = $flow.Expression } - $null = Add-ADSyncAttributeFlowMapping -SynchronizationRule $newRule @params -} - -# Set the usage location flow mapping manually -$null = Add-ADSyncAttributeFlowMapping -SynchronizationRule $newRule -Source @('c') -Destination 'usageLocation' -FlowType 'Direct' -ValueMergeType 'Update' - -# Add appropriate scope and join conditions -$newRule.JoinFilter = $defaultRule.JoinFilter -$newRule.ScopeFilter = $defaultRule.ScopeFilter - -# Remove the old rule and add the new one -$null = Remove-ADSyncRule -SynchronizationRule $defaultRule -Add-ADSyncRule -SynchronizationRule $newRule diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/Backup.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/Backup.xml deleted file mode 100755 index 853f964812..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/Backup.xml +++ /dev/null @@ -1,20 +0,0 @@ - - 01 00 04 9c 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00 00 04 00 ec 00 08 00 00 00 05 02 28 00 00 01 00 00 01 00 00 00 8f fd ac ed b3 ff d1 11 b4 1d 00 a0 c9 68 f9 39 01 01 00 00 00 00 00 05 0b 00 00 00 00 00 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 b2 02 19 6c 8a 79 30 07 b6 4d ff 37 00 02 00 00 00 02 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 b2 02 19 6c 8a 79 30 07 b6 4d ff 37 00 02 00 00 00 02 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 b2 02 19 6c 8a 79 30 07 b6 4d ff 37 07 02 00 00 00 02 14 00 94 00 02 00 01 01 00 00 00 00 00 05 09 00 00 00 00 02 14 00 94 00 02 00 01 01 00 00 00 00 00 05 0b 00 00 00 00 02 14 00 ff 00 0f 00 01 01 00 00 00 00 00 05 12 00 00 00 00 0a 14 00 ff 00 0f 00 01 01 00 00 00 00 00 03 00 00 00 00 - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/DomainSysvol/GPO/Machine/microsoft/windows nt/SecEdit/GptTmpl.inf b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/DomainSysvol/GPO/Machine/microsoft/windows nt/SecEdit/GptTmpl.inf deleted file mode 100755 index 3f999896e2..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/DomainSysvol/GPO/Machine/microsoft/windows nt/SecEdit/GptTmpl.inf and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/bkupInfo.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/bkupInfo.xml deleted file mode 100755 index 2249e6fad1..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/bkupInfo.xml +++ /dev/null @@ -1 +0,0 @@ - diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/gpreport.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/gpreport.xml deleted file mode 100755 index c84eff7dea..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{0AF343A0-248D-4CA5-B19E-5FA46DAE9F9C}/gpreport.xml and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/Backup.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/Backup.xml deleted file mode 100755 index f024a3e051..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/Backup.xml +++ /dev/null @@ -1,20 +0,0 @@ - - 01 00 04 9c 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00 00 04 00 ec 00 08 00 00 00 05 02 28 00 00 01 00 00 01 00 00 00 8f fd ac ed b3 ff d1 11 b4 1d 00 a0 c9 68 f9 39 01 01 00 00 00 00 00 05 0b 00 00 00 00 00 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 b2 02 19 6c 8a 79 30 07 b6 4d ff 37 00 02 00 00 00 02 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 b2 02 19 6c 8a 79 30 07 b6 4d ff 37 00 02 00 00 00 02 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 b2 02 19 6c 8a 79 30 07 b6 4d ff 37 07 02 00 00 00 02 14 00 94 00 02 00 01 01 00 00 00 00 00 05 09 00 00 00 00 02 14 00 94 00 02 00 01 01 00 00 00 00 00 05 0b 00 00 00 00 02 14 00 ff 00 0f 00 01 01 00 00 00 00 00 05 12 00 00 00 00 0a 14 00 ff 00 0f 00 01 01 00 00 00 00 00 03 00 00 00 00 - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/DomainSysvol/GPO/Machine/microsoft/windows nt/SecEdit/GptTmpl.inf b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/DomainSysvol/GPO/Machine/microsoft/windows nt/SecEdit/GptTmpl.inf deleted file mode 100755 index 260c45235c..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/DomainSysvol/GPO/Machine/microsoft/windows nt/SecEdit/GptTmpl.inf and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/bkupInfo.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/bkupInfo.xml deleted file mode 100755 index 2956bdb735..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/bkupInfo.xml +++ /dev/null @@ -1 +0,0 @@ - diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/gpreport.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/gpreport.xml deleted file mode 100755 index 1af8d56c4f..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{742211F9-1482-4D06-A8DE-BA66101933EB}/gpreport.xml and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/Backup.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/Backup.xml deleted file mode 100755 index 8a63186e82..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/Backup.xml +++ /dev/null @@ -1,18 +0,0 @@ - - 01 00 04 9c 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00 00 04 00 ec 00 08 00 00 00 05 02 28 00 00 01 00 00 01 00 00 00 8f fd ac ed b3 ff d1 11 b4 1d 00 a0 c9 68 f9 39 01 01 00 00 00 00 00 05 0b 00 00 00 00 00 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 d9 8f a5 5d d2 4c c6 98 af 40 be 5b 00 02 00 00 00 02 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 d9 8f a5 5d d2 4c c6 98 af 40 be 5b 00 02 00 00 00 02 24 00 ff 00 0f 00 01 05 00 00 00 00 00 05 15 00 00 00 d9 8f a5 5d d2 4c c6 98 af 40 be 5b 07 02 00 00 00 02 14 00 94 00 02 00 01 01 00 00 00 00 00 05 09 00 00 00 00 02 14 00 94 00 02 00 01 01 00 00 00 00 00 05 0b 00 00 00 00 02 14 00 ff 00 0f 00 01 01 00 00 00 00 00 05 12 00 00 00 00 0a 14 00 ff 00 0f 00 01 01 00 00 00 00 00 03 00 00 00 00 - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/DomainSysvol/GPO/Machine/comment.cmtx b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/DomainSysvol/GPO/Machine/comment.cmtx deleted file mode 100755 index 744ba567c9..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/DomainSysvol/GPO/Machine/comment.cmtx +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - \ No newline at end of file diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/DomainSysvol/GPO/Machine/registry.pol b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/DomainSysvol/GPO/Machine/registry.pol deleted file mode 100755 index bd3cfbff94..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/DomainSysvol/GPO/Machine/registry.pol and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/bkupInfo.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/bkupInfo.xml deleted file mode 100755 index 7cd52f871d..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/bkupInfo.xml +++ /dev/null @@ -1 +0,0 @@ - diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/gpreport.xml b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/gpreport.xml deleted file mode 100755 index 2d59cfedf6..0000000000 Binary files a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/source/{EE9EF278-1F3F-461C-9F7A-97F2B82C04B4}/gpreport.xml and /dev/null differ diff --git a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/user_details_template.csv b/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/user_details_template.csv deleted file mode 100755 index 2c0f55a0fe..0000000000 --- a/deployment/safe_haven_management_environment/desired_state_configuration/dc1Artifacts/user_details_template.csv +++ /dev/null @@ -1,2 +0,0 @@ -SamAccountName,GivenName,Surname,Mobile,SecondaryEmail,GroupName -TestUser1,Test,User1,+44 7700900000,Learner01@example.com,SG SANDBOX Research Users diff --git a/deployment/safe_haven_management_environment/network_rules/shm-firewall-rules.json b/deployment/safe_haven_management_environment/network_rules/shm-firewall-rules.json deleted file mode 100644 index 4ac6e1e8cb..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-firewall-rules.json +++ /dev/null @@ -1,361 +0,0 @@ -{ - "routes": [ - { - "name": "ViaFirewall", - "properties": { - "addressPrefix": "0.0.0.0/0", - "nextHop": "{{firewall.privateIpAddress}}" - } - }, - { - "name": "ViaVpn", - "properties": { - "addressPrefix": "{{network.vpn.cidr}}", - "nextHop": "VirtualNetworkGateway" - } - } - ], - "networkRuleCollections": [ - { - "name": "shm-{{id}}-allow", - "properties": { - "priority": 1000, - "action": { - "type": "Allow" - }, - "rules": [ - { - "name": "AllowExternalAzureAutomationOperations", - "protocols": [ - "TCP:*", - "UDP:*" - ], - "targetAddresses": [ - "GuestAndHybridManagement", - ], - "sourceAddresses": [ - "*" - ] - }, - { - "name": "AllowExternalNTP", - "protocols": [ - "UDP:123" - ], - "targetAddresses": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "sourceAddresses": [ - "*" - ] - }, - { - "name": "AllowExternalDnsResolver", - "protocols": [ - "TCP:53", - "UDP:53" - ], - "targetAddresses": [ - "{{dc.external_dns_resolver}}" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalPackageRSyncCRAN", - "protocols": [ - "TCP:873" - ], - "targetFqdns": [ - "cran.r-project.org" - ], - "sourceAddresses": [ - "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - ] - }, - ] - } - } - ], - "applicationRuleCollections": [ - { - "name": "shm-{{id}}-allow", - "properties": { - "priority": 1000, - "action": { - "type": "Allow" - }, - "rules": [ - { - "name": "AllowExternalAzureADConnectOperations", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "*.blob.core.windows.net", - "*.servicebus.windows.net", - "aadconnecthealth.azure.com", - "adminwebservice.microsoftonline.com", - "s1.adhybridhealth.azure.com", - "umwatson.events.data.microsoft.com", - "v10.events.data.microsoft.com", - "v20.events.data.microsoft.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalAzureADConnectPasswordReset", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "*-sb.servicebus.windows.net", - "*.servicebus.windows.net", - "passwordreset.microsoftonline.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalAzureADConnectSetup", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "s1.adhybridhealth.azure.com", - "management.azure.com", - "policykeyservice.dc.ad.msft.net", - "www.office.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalAzureADLogin", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "aadcdn.msftauth.net", - "login.live.com", - "login.microsoftonline.com", - "login.windows.net", - "secure.aadcdn.microsoftonline-p.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalAzureMFAConnectOperations", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "css.phonefactor.net", - "pfd.phonefactor.net", - "pfd2.phonefactor.net" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalAzureMFAConnectSetup", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "adnotifications.windowsazure.com", - "credentials.azure.com", - "strongauthenticationservice.auth.microsoft.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalCertificateStatusCheck", - "protocols": [ - "HTTP:80" - ], - "fqdnTags": [], - "targetFqdns": [ - "crl.microsoft.com", - "crl3.digicert.com", - "crl4.digicert.com", - "ocsp.digicert.com" - ], - "sourceAddresses": [ - "*" - ] - }, - { - "name": "AllowExternalGoogleNTP", - "protocols": [ - "HTTP:123" - ], - "fqdnTags": [], - "targetFqdns": [ - {{#time.ntp.serverFqdns}} - "{{.}}", - {{/time.ntp.serverFqdns}} - ], - "sourceAddresses": [ - "*" - ] - }, - { - "name": "AllowExternalLinuxUpdate", - "protocols": [ - "HTTP:80", - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "apt.postgresql.org", - "archive.ubuntu.com", - "azure.archive.ubuntu.com", - "changelogs.ubuntu.com", - "d20rj4el6vkp4c.cloudfront.net", - "dbeaver.io", - "developer.download.nvidia.com", - "packages.gitlab.com", - "packages.microsoft.com", - "qgis.org", - "security.ubuntu.com", - "ubuntu.qgis.org" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.updateServers.cidr}}" - ] - }, - { - "name": "AllowExternalLinuxClamAVUpdate", - "protocols": [ - "HTTP:80", - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "current.cvd.clamav.net", - "database.clamav.net.cdn.cloudflare.net", - "database.clamav.net" - ], - "sourceAddresses": [ - "*" - ] - }, - { - "name": "AllowExternalPackageDownloadCRAN", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "cran.r-project.org" - ], - "sourceAddresses": [ - "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}" - ] - }, - { - "name": "AllowExternalPackageDownloadPyPI", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "files.pythonhosted.org", - "pypi.org" - ], - "sourceAddresses": [ - "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}" - ] - }, - { - "name": "AllowExternalPowershellModuleInstallation", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "psg-prod-eastus.azureedge.net", - "www.powershellgallery.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalPowershellModuleMSOnlineConnections", - "protocols": [ - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "provisioningapi.microsoftonline.com" - ], - "sourceAddresses": [ - "{{network.vnet.subnets.identity.cidr}}" - ] - }, - { - "name": "AllowExternalWindowsUpdate", - "protocols": [ - "HTTP:80", - "HTTPS:443" - ], - "fqdnTags": [], - "targetFqdns": [ - "au.download.windowsupdate.com", - "{{storage.artifacts.accountName}}.blob.core.windows.net", - "ctldl.windowsupdate.com", - "download.microsoft.com", - "download.windowsupdate.com", - "fe2cr.update.microsoft.com", - "fe3cr.delivery.mp.microsoft.com", - "geo-prod.do.dsp.mp.microsoft.com", - "go.microsoft.com", - "ntservicepack.microsoft.com", - "onegetcdn.azureedge.net", - "settings-win.data.microsoft.com", - "slscr.update.microsoft.com", - "test.stats.update.microsoft.com", - "tlu.dl.delivery.mp.microsoft.com", - "umwatson.events.data.microsoft.com", - "v10.events.data.microsoft.com", - "v10.vortex-win.data.microsoft.com", - "v20.events.data.microsoft.com", - "windowsupdate.microsoft.com" - ], - "sourceAddresses": [ - "*" - ] - } - ] - } - } - ], - "natRuleCollections": [] -} diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-identity.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-identity.json deleted file mode 100644 index 9845648249..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-identity.json +++ /dev/null @@ -1,238 +0,0 @@ -[ - { - "name": "AllowDomainControllerInternalUdpInbound", - "access": "Allow", - "description": "Allow domain controller internal services over UDP (see https://bit.ly/3ggW9fm for details).", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["137", "138"], - "direction": "Inbound", - "priority": 100, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainControllerInternalTcpInbound", - "access": "Allow", - "description": "Allow domain controller internal services over TCP (see https://bit.ly/3ggW9fm for details).", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["139", "445", "9389", "49152-65535"], - "direction": "Inbound", - "priority": 200, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsUdpInbound", - "access": "Allow", - "description": "Allow domain-joined client requests over UDP: Kerberos; LDAP.", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "389"], - "direction": "Inbound", - "priority": 1000, - "protocol": "UDP", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsTcpInbound", - "access": "Allow", - "description": "Allow domain-joined client requests over TCP: (see https://devopstales.github.io/linux/pfsense-ad-join/ for details).", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "135", "139", "389", "445", "464", "636", "3268", "3269", "49152-65535"], - "direction": "Inbound", - "priority": 1100, - "protocol": "TCP", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSInbound", - "access": "Allow", - "description": "Allow DNS requests", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Inbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "AllowAdminVPNInbound", - "access": "Allow", - "description": "Allow RDP connection to servers from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "3389", - "direction": "Inbound", - "priority": 2000, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainControllerInternalUdpOutbound", - "access": "Allow", - "description": "Allow domain controller internal services over UDP: (see https://bit.ly/3ggW9fm for details).", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["137", "138"], - "direction": "Outbound", - "priority": 100, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainControllerInternalTcpOutbound", - "access": "Allow", - "description": "Allow domain controller internal services over TCP: (see https://bit.ly/3ggW9fm for details).", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["139", "445", "9389", "49152-65535"], - "direction": "Outbound", - "priority": 200, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsUdpOutbound", - "access": "Allow", - "description": "Allow domain-joined client requests over UDP: Kerberos; LDAP.", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "389"], - "direction": "Outbound", - "priority": 1000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsTcpOutbound", - "access": "Allow", - "description": "Allow domain-joined client requests over TCP: (see https://devopstales.github.io/linux/pfsense-ad-join/ for details).", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "135", "139", "389", "445", "464", "636", "3268", "3269", "49152-65535"], - "direction": "Outbound", - "priority": 1100, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow domain controller internal DNS requests", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalAzureStorageOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure storage", - "destinationAddressPrefix": "Storage", - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 3500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowWindowsUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Windows update servers", - "destinationAddressPrefix": [ - {{#monitoring.updateServers.externalIpAddresses.windows}} - "{{.}}", - {{/monitoring.updateServers.externalIpAddresses.windows}} - ], - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 3700, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalInternetOutbound", - "access": "Allow", - "description": "Allow outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-monitoring.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-monitoring.json deleted file mode 100644 index aefc2e2542..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-monitoring.json +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "name": "AllowVirtualNetworkInbound", - "access": "Allow", - "description": "Allow inbound connections from the local virtual network", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 1500, - "protocol": "*", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-deployment-tier2.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-deployment-tier2.json deleted file mode 100644 index 86470cc09c..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-deployment-tier2.json +++ /dev/null @@ -1,74 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.deployment.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalInternetOutbound", - "access": "Allow", - "description": "Allow outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-deployment-tier3.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-deployment-tier3.json deleted file mode 100644 index 71be79c068..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-deployment-tier3.json +++ /dev/null @@ -1,74 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.deployment.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalInternetOutbound", - "access": "Allow", - "description": "Allow outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-external-tier2.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-external-tier2.json deleted file mode 100644 index bec11da3ab..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-external-tier2.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowMirrorSynchronisationOutbound", - "access": "Allow", - "description": "Allow rsync over ports 22 and 873.", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": ["22", "873"], - "direction": "Outbound", - "priority": 300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalPackageFetchingOutbound", - "access": "Allow", - "description": "Allow ports 443 (https) and 873 (unencrypted rsync) for fetching packages", - "destinationAddressPrefix": "Internet", - "destinationPortRange": ["443", "873"], - "direction": "Outbound", - "priority": 3200, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-external-tier3.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-external-tier3.json deleted file mode 100644 index 7f066bb2df..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-external-tier3.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowMirrorSynchronisationOutbound", - "access": "Allow", - "description": "Allow rsync over ports 22 and 873.", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": ["22", "873"], - "direction": "Outbound", - "priority": 300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalPackageFetchingOutbound", - "access": "Allow", - "description": "Allow ports 443 (https) and 873 (unencrypted rsync) for fetching packages", - "destinationAddressPrefix": "Internet", - "destinationPortRange": ["443", "873"], - "direction": "Outbound", - "priority": 3200, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-internal-tier2.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-internal-tier2.json deleted file mode 100644 index 0755178b07..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-internal-tier2.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "name": "AllowMirrorSynchronisationInbound", - "access": "Allow", - "description": "Allow rsync over ports 22 and 873.", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": ["22", "873"], - "direction": "Inbound", - "priority": 300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowRepositoryAccessInbound", - "access": "Allow", - "description": "Allow package requests over ports 80 (http), 443 (pip) and 3128 (pip).", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": ["80", "443", "3128"], - "direction": "Inbound", - "priority": 1400, - "protocol": "TCP", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyDNSOutbound", - "access": "Deny", - "description": "Deny DNS requests to SHM", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-internal-tier3.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-internal-tier3.json deleted file mode 100644 index f1009f1997..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-mirrors-internal-tier3.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "name": "AllowMirrorSynchronisationInbound", - "access": "Allow", - "description": "Allow rsync over ports 22 and 873.", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": ["22", "873"], - "direction": "Inbound", - "priority": 300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowRepositoryAccessInbound", - "access": "Allow", - "description": "Allow package requests over ports 80 (http), 443 (pip) and 3128 (pip).", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": ["80", "443", "3128"], - "direction": "Inbound", - "priority": 1400, - "protocol": "TCP", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyDNSOutbound", - "access": "Deny", - "description": "Deny DNS requests to SHM", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-proxies-tier2.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-proxies-tier2.json deleted file mode 100644 index 9d9fb4d810..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-proxies-tier2.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "name": "AllowRepositoryAccessInbound", - "access": "Allow", - "description": "Allow package requests over http", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "destinationPortRange": "80", - "direction": "Inbound", - "priority": 1400, - "protocol": "*", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalPackageFetchingOutbound", - "access": "Allow", - "description": "Allow ports 80 (http) and 443 (https) for fetching packages", - "destinationAddressPrefix": "Internet", - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 3200, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier2.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-proxies-tier3.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-proxies-tier3.json deleted file mode 100644 index d4c9558fa8..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-repository-proxies-tier3.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "name": "AllowRepositoryAccessInbound", - "access": "Allow", - "description": "Allow package requests over http", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "destinationPortRange": "80", - "direction": "Inbound", - "priority": 1400, - "protocol": "*", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalPackageFetchingOutbound", - "access": "Allow", - "description": "Allow ports 80 (http) and 443 (https) for fetching packages", - "destinationAddressPrefix": "Internet", - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 3200, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnetRepositoriesTier3.subnets.proxies.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-update-servers.json b/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-update-servers.json deleted file mode 100644 index bcc3bbfa22..0000000000 --- a/deployment/safe_haven_management_environment/network_rules/shm-nsg-rules-update-servers.json +++ /dev/null @@ -1,118 +0,0 @@ -[ - { - "name": "AllowVirtualNetworkInbound", - "access": "Allow", - "description": "Allow inbound connections from the local virtual network", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 1500, - "protocol": "*", - "sourceAddressPrefix": "VirtualNetwork", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#time.ntp.serverAddresses}} - "{{.}}", - {{/time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": [ - {{#monitoring.updateServers.externalIpAddresses.linux}} - "{{.}}", - {{/monitoring.updateServers.externalIpAddresses.linux}} - ], - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 3600, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{network.vnet.subnets.updateServers.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/safe_haven_management_environment/setup/.gitignore b/deployment/safe_haven_management_environment/setup/.gitignore deleted file mode 100644 index b2290143a4..0000000000 --- a/deployment/safe_haven_management_environment/setup/.gitignore +++ /dev/null @@ -1 +0,0 @@ -certs diff --git a/deployment/safe_haven_management_environment/setup/Copy_SHM_Users.ps1 b/deployment/safe_haven_management_environment/setup/Copy_SHM_Users.ps1 deleted file mode 100644 index 8bc5ee3565..0000000000 --- a/deployment/safe_haven_management_environment/setup/Copy_SHM_Users.ps1 +++ /dev/null @@ -1,75 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID for the old SHM (e.g. 'project')")] - [string]$oldShmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID for the new SHM (e.g. 'project')")] - [string]$newShmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$oldConfig = Get-ShmConfig -shmId $oldShmId -$newConfig = Get-ShmConfig -shmId $newShmId -$originalContext = Get-AzContext - - -# Extract list of users -# --------------------- -$null = Set-AzContext -SubscriptionId $oldConfig.subscriptionName -ErrorAction Stop -Add-LogMessage -Level Info "Exporting user list for $($oldConfig.shm.id) from $($oldConfig.dc.vmName)..." -# Run remote script -$script = @" -`$userOuPath = (Get-ADObject -Filter * | Where-Object { `$_.Name -eq "Safe Haven Research Users" }).DistinguishedName -`$users = Get-ADUser -Filter * -SearchBase "`$userOuPath" -Properties * -foreach (`$user in `$users) { - `$groupName = (`$user | Select-Object -ExpandProperty MemberOf | ForEach-Object { ((`$_ -Split ",")[0] -Split "=")[1] }) -join "|" - `$user | Add-Member -NotePropertyName GroupName -NotePropertyValue `$groupName -Force -} -`$users | Select-Object SamAccountName,GivenName,Surname,Mobile,GroupName | ` - ConvertTo-Csv | Where-Object { `$_ -notmatch '^#' } | ` - ForEach-Object { `$_.replace('"','') } -"@ -$result = Invoke-RemoteScript -Shell "PowerShell" -Script $script -VMName $oldConfig.dc.vmName -ResourceGroupName $oldConfig.dc.rg -$null = Set-AzContext -Context $originalContext -ErrorAction Stop - - -# Construct list of groups -# ------------------------ -Add-LogMessage -Level Info "Constructing list of user groups from $($oldConfig.shm.id)..." -$users = $result.Value[0].Message | ConvertFrom-Csv -$securityGroups = @() -foreach ($user in $users) { - $securityGroups += @($user.GroupName.Split("|")) -} -$securityGroups = $securityGroups | Sort-Object | Get-Unique - - -# Create security groups on new SHM -# --------------------------------- -$null = Set-AzContext -SubscriptionId $newConfig.subscriptionName -ErrorAction Stop -Write-Output "Creating security groups and user list for $($newConfig.shm.id) on $($newConfig.dc.vmName)..." -$script = @" -foreach (`$groupName in @('$($securityGroups -join "','")')) { - `$groupExists = `$(Get-ADGroup -Filter "Name -eq '`$groupName'").Name - if (`$groupExists) { - Write-Output " [o] Security group '`$groupName' already exists" - } else { - New-ADGroup -Name "`$groupName" -GroupScope Global -Description "`$groupName" -GroupCategory Security -Path "OU=$($newConfig.domain.ous.securityGroups.name),$($newConfig.domain.dn)" - if (`$?) { - Write-Output " [o] Security group '`$groupName' created successfully" - } else { - Write-Output " [x] Security group '`$groupName' creation failed!" - } - } -} -`$userFilePath = "$($newConfig.dc.installationDirectory)\$(Get-Date -UFormat %Y%m%d)_imported_user_details.csv" -"$($($users | ConvertTo-Csv | ForEach-Object { $_.replace('"', '') }) -join ';')" -split ';' | Out-File `$userFilePath -$($newConfig.dc.installationDirectory)\CreateUsers.ps1 -userFilePath `$userFilePath 2> Out-Null -"@ -$null = Invoke-RemoteScript -Shell "PowerShell" -Script $script -VMName $newConfig.dc.vmName -ResourceGroupName $newConfig.dc.rg -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Deploy_SHM.ps1 b/deployment/safe_haven_management_environment/setup/Deploy_SHM.ps1 deleted file mode 100755 index 1a4216ccfd..0000000000 --- a/deployment/safe_haven_management_environment/setup/Deploy_SHM.ps1 +++ /dev/null @@ -1,105 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project'. Maximum of seven alphanumeric characters)")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "Use device authentication for connecting to Azure and Microsoft Graph")] - [switch]$UseDeviceAuthentication -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Microsoft.Graph.Authentication -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Connect to Azure -# ---------------- -if (Get-AzContext) { Disconnect-AzAccount | Out-Null } # force a refresh of the Azure token before starting -Add-LogMessage -Level Info "Attempting to authenticate with Azure. Please sign in with an account with admin rights over the subscriptions you plan to use." -if ($UseDeviceAuthentication) { - Connect-AzAccount -UseDeviceAuthentication -ErrorAction Stop | Out-Null -} else { - Connect-AzAccount -ErrorAction Stop | Out-Null -} -if (Get-AzContext) { - Add-LogMessage -Level Success "Authenticated with Azure as $((Get-AzContext).Account.Id)" -} else { - Add-LogMessage -Level Fatal "Failed to authenticate with Azure" -} - - -# Connect to Microsoft Graph -# -------------------------- -if (Get-MgContext) { Disconnect-MgGraph | Out-Null } # force a refresh of the Microsoft Graph token before starting -Add-LogMessage -Level Info "Attempting to authenticate with Microsoft Graph. Please sign in with an account with admin rights over the Azure Active Directory you plan to use." -if ($UseDeviceAuthentication) { - Connect-MgGraph -TenantId $config.azureAdTenantId -Scopes "User.ReadWrite.All", "UserAuthenticationMethod.ReadWrite.All", "Directory.AccessAsUser.All", "RoleManagement.ReadWrite.Directory" -ErrorAction Stop -ContextScope Process -UseDeviceAuthentication -} else { - Connect-MgGraph -TenantId $config.azureAdTenantId -Scopes "User.ReadWrite.All", "UserAuthenticationMethod.ReadWrite.All", "Directory.AccessAsUser.All", "RoleManagement.ReadWrite.Directory" -ErrorAction Stop -ContextScope Process | Out-Null -} -if (Get-MgContext) { - Add-LogMessage -Level Success "Authenticated with Microsoft Graph as $((Get-MgContext).Account)" -} else { - Add-LogMessage -Level Fatal "Failed to authenticate with Microsoft Graph" -} - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -Subscription $config.dns.subscriptionName -ErrorAction Stop - - -# Check Powershell requirements -# ----------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot '..' '..' 'CheckRequirements.ps1')" } - - -# Deploy the SHM KeyVault and register emergency user with AAD -# ------------------------------------------------------------ -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Key_Vault_And_Emergency_Admin.ps1')" -shmId $shmId } - - -# Setup SHM networking and VPN -# ---------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Networking.ps1')" -shmId $shmId } - - -# Setup SHM monitoring -# -------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Monitoring.ps1')" -shmId $shmId } - - -# Setup SHM firewall and routing -# ------------------------------ -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Firewall.ps1')" -shmId $shmId } - - -# Setup SHM storage accounts -# -------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Storage_Accounts.ps1')" -shmId $shmId } - - -# Setup SHM domain controllers -# ---------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_DC.ps1')" -shmId $shmId } - - -# Setup SHM update servers -# ------------------------ -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Update_Servers.ps1')" -shmId $shmId } - - -# Setup SHM package repositories -# ------------------------------ -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Package_Repositories.ps1')" -shmId $shmId } - - -# Register all SHM VMs with LogAnalytics -# -------------------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SHM_Register_VMs.ps1')" -shmId $shmId } - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_AAD_Domain.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_AAD_Domain.ps1 deleted file mode 100644 index 5dd5cc735c..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_AAD_Domain.ps1 +++ /dev/null @@ -1,115 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Dns -ErrorAction Stop -Import-Module Microsoft.Graph.Authentication -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -Subscription $config.dns.subscriptionName -ErrorAction Stop - - -# Connect to Microsoft Graph -# -------------------------- -if (-not (Get-MgContext)) { - Add-LogMessage -Level Info "Attempting to authenticate with Microsoft Graph. Please sign in with an account with admin rights over the Azure Active Directory you plan to use." - Connect-MgGraph -TenantId $config.azureAdTenantId -Scopes "User.ReadWrite.All", "UserAuthenticationMethod.ReadWrite.All", "Directory.AccessAsUser.All", "RoleManagement.ReadWrite.Directory" -ErrorAction Stop -ContextScope Process - if (Get-MgContext) { - Add-LogMessage -Level Success "Authenticated with Microsoft Graph" - } else { - Add-LogMessage -Level Fatal "Failed to authenticate with Microsoft Graph" - } -} - - -# Ensure that the SHM domain is registered with the Azure AD -# ---------------------------------------------------------- -Add-LogMessage -Level Info "Ensuring that '$($config.domain.fqdn)' is registered with AAD..." -$aadDomain = Get-MgDomain | Where-Object { $_.Id -eq $config.domain.fqdn } -if ($aadDomain) { - Add-LogMessage -Level InfoSuccess "'$($config.domain.fqdn)' already present as custom domain on SHM AAD." -} else { - $aadDomain = New-MgDomain -Id $config.domain.fqdn - Add-LogMessage -Level Success "'$($config.domain.fqdn)' added as custom domain on SHM AAD." -} - - -# Verify the SHM domain record for the Azure AD -# --------------------------------------------- -Add-LogMessage -Level Info "Ensuring that '$($config.domain.fqdn)' is verified with AAD..." -if ($aadDomain.IsVerified) { - Add-LogMessage -Level InfoSuccess "'$($config.domain.fqdn)' already verified on SHM AAD." -} else { - # Fetch TXT version of AAD domain verification record set - $validationRecord = Get-MgDomainVerificationDnsRecord -DomainId $config.domain.fqdn | Where-Object { $_.RecordType -eq "Txt" } - # Make a DNS TXT Record object containing the validation code - $validationCode = New-AzDnsRecordConfig -Value $validationRecord.AdditionalProperties.text - - # Check if this validation record already exists for the domain - $recordSet = Get-AzDnsRecordSet -RecordType TXT -Name "@" -ZoneName $config.domain.fqdn -ResourceGroupName $config.dns.rg -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - # If no TXT record set exists at all, create a new TXT record set with the domain validation code - $null = Deploy-DnsRecord -DnsRecords $validationCode -RecordName "@" -RecordType "TXT" -ResourceGroupName $config.dns.rg -Subscription $config.dns.subscriptionName -TtlSeconds 30 -ZoneName $config.domain.fqdn - Add-LogMessage -Level Success "Verification TXT record added to '$($config.domain.fqdn)' DNS zone." - } else { - # Check if the verification TXT record already exists in domain DNS zone - $existingRecord = $recordSet.Records | Where-Object { $_.Value -eq $validationCode } - if ($existingRecord) { - Add-LogMessage -Level InfoSuccess "Verification TXT record already exists in '$($config.domain.fqdn)' DNS zone." - } else { - # Add the verification TXT record if it did not already exist - $null = Add-AzDnsRecordConfig -RecordSet $recordSet -Value $validationCode - $null = Set-AzDnsRecordSet -RecordSet $recordSet - Add-LogMessage -Level Success "Verification TXT record added to '$($config.domain.fqdn)' DNS zone." - } - } - # Verify domain on AAD - $maxTries = 10 - $retryDelaySeconds = 60 - - for ($tries = 1; $tries -le $maxTries; $tries++) { - Confirm-MgDomain -DomainId $config.domain.fqdn | Out-Null - Add-LogMessage -Level Info "Checking domain verification status on SHM AAD (attempt $tries of $maxTries)..." - $aadDomain = Get-MgDomain -DomainId $config.domain.fqdn - if ($aadDomain.IsVerified) { - Add-LogMessage -Level Success "Domain '$($config.domain.fqdn)' is verified on SHM AAD." - break - } elseif ($tries -eq $maxTries) { - Add-LogMessage -Level Fatal "Failed to verify domain '$($config.domain.fqdn)' after $tries attempts. Please try again later." - } else { - Add-LogMessage -Level Warning "Verification check failed. Retrying in $retryDelaySeconds seconds..." - Start-Sleep -Seconds $retryDelaySeconds - Confirm-MgDomain -DomainId $config.domain.fqdn | Out-Null - } - } -} - - -# Make domain primary on SHM AAD -# ------------------------------ -Add-LogMessage -Level Info "Ensuring that '$($config.domain.fqdn)' is primary domain on SHM AAD." -if ($aadDomain.IsDefault) { - Add-LogMessage -Level InfoSuccess "'$($config.domain.fqdn)' is already primary domain on SHM AAD." -} else { - $null = Update-MgDomain -DomainId $config.domain.fqdn -IsDefault - $aadDomain = Get-MgDomain -DomainId $config.domain.fqdn - if ($aadDomain.IsDefault) { - Add-LogMessage -Level Success "Set '$($config.domain.fqdn)' as primary domain on SHM AAD." - } else { - Add-LogMessage -Level Fatal "Unable to set '$($config.domain.fqdn)' as primary domain on SHM AAD!" - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_DC.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_DC.ps1 deleted file mode 100644 index 5365653c21..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_DC.ps1 +++ /dev/null @@ -1,203 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Az.Storage -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/RemoteCommands -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop -$storageAccount = Get-AzStorageAccount -ResourceGroupName $config.storage.artifacts.rg -Name $config.storage.artifacts.accountName - -# Upload DSC scripts -# ------------------ -Add-LogMessage -Level Info "[ ] Uploading desired state configuration (DSC) files to storage account '$($storageAccount.StorageAccountName)'..." -$dscPath = Join-Path $PSScriptRoot ".." "desired_state_configuration" -$success = $true -$null = Publish-AzVMDscConfiguration -ConfigurationPath (Join-Path $dscPath "DC1DesiredState.ps1") ` - -ContainerName $config.storage.artifacts.containers.shmDesiredState ` - -Force ` - -ResourceGroupName $config.storage.artifacts.rg ` - -SkipDependencyDetection ` - -StorageAccountName $config.storage.artifacts.accountName -$success = $success -and $? -$null = Publish-AzVMDscConfiguration -ConfigurationPath (Join-Path $dscPath "DC2DesiredState.ps1") ` - -ContainerName $config.storage.artifacts.containers.shmDesiredState ` - -Force ` - -ResourceGroupName $config.storage.artifacts.rg ` - -SkipDependencyDetection ` - -StorageAccountName $config.storage.artifacts.accountName -$success = $success -and $? -if ($success) { - Add-LogMessage -Level Success "Uploaded desired state configuration (DSC) files" -} else { - Add-LogMessage -Level Fatal "Failed to upload desired state configuration (DSC) files!" -} - - -# Upload artifacts for configuring the DC -# --------------------------------------- -Add-LogMessage -Level Info "[ ] Uploading domain controller (DC) configuration files to storage account '$($storageAccount.StorageAccountName)'..." -$success = $true -foreach ($filePath in $(Get-ChildItem -File (Join-Path $dscPath "dc1Artifacts"))) { - if ($($filePath | Split-Path -Leaf) -eq "Disconnect_AD.mustache.ps1") { - # Expand the AD disconnection template before uploading - $adScriptLocalFilePath = (New-TemporaryFile).FullName - Expand-MustacheTemplate -Template $(Get-Content $filePath -Raw) -Parameters $config | Out-File $adScriptLocalFilePath - $null = Set-AzStorageBlobContent -Container $config.storage.artifacts.containers.shmArtifactsDC -Context $storageAccount.Context -Blob "Disconnect_AD.ps1" -File $adScriptLocalFilePath -Force - $null = Remove-Item $adScriptLocalFilePath - } else { - $null = Set-AzStorageBlobContent -Container $config.storage.artifacts.containers.shmArtifactsDC -Context $storageAccount.Context -File $filePath -Force - } - $success = $success -and $? -} -if ($success) { - Add-LogMessage -Level Success "Uploaded domain controller (DC) configuration files" -} else { - Add-LogMessage -Level Fatal "Failed to upload domain controller (DC) configuration files!" -} - - -# Upload Windows package installers -# --------------------------------- -Add-LogMessage -Level Info "[ ] Uploading Windows package installers to storage account '$($storageAccount.StorageAccountName)'..." -try { - # AzureADConnect - $null = Set-AzureStorageBlobFromUri -FileUri "https://download.microsoft.com/download/B/0/0/B00291D0-5A83-4DE7-86F5-980BC00DE05A/AzureADConnect.msi" -StorageContainer $config.storage.artifacts.containers.shmArtifactsDC -StorageContext $storageAccount.Context - Add-LogMessage -Level Success "Uploaded Windows package installers" -} catch { - Add-LogMessage -Level Fatal "Failed to upload Windows package installers!" -Exception $_.Exception -} - - -# Create SHM DC resource group if it does not exist -# ------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.dc.rg -Location $config.location - - -# Retrieve usernames/passwords from the Key Vault -# ----------------------------------------------- -Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.keyVault.name)'..." -$domainAdminUsername = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.domainAdminUsername -DefaultValue "domain$($config.id)admin".ToLower() -AsPlaintext -$domainAdminPassword = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.domainAdminPassword -DefaultLength 20 -AsPlaintext -$safemodeAdminPassword = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.dc.safemodePasswordSecretName -DefaultLength 20 -AsPlaintext - - -# Deploy SHM DC from template -# --------------------------- -Add-LogMessage -Level Info "Deploying domain controllers from template..." -$params = @{ - administratorPassword = (ConvertTo-SecureString $domainAdminPassword -AsPlainText -Force) - administratorUsername = $domainAdminUsername - bootDiagnosticsAccountName = $config.storage.bootdiagnostics.accountName - dc1HostName = $config.dc.hostname - dc1IpAddress = $config.dc.ip - dc1VmName = $config.dc.vmName - dc2HostName = $config.dcb.hostname - dc2IpAddress = $config.dcb.ip - dc2VmName = $config.dcb.vmName - externalDnsResolverIpAddress = $config.dc.external_dns_resolver - shmId = $config.id - virtualNetworkName = $config.network.vnet.name - virtualNetworkResourceGroupName = $config.network.vnet.rg - virtualNetworkSubnetName = $config.network.vnet.subnets.identity.name - vmOsDiskSizeGb = [int]$config.dc.disks.os.sizeGb - vmOsDiskType = $config.dc.disks.os.type - vmSize = $config.dc.vmSize -} -Deploy-ArmTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "arm_templates" "shm-dc-template.json") -TemplateParameters $params -ResourceGroupName $config.dc.rg - - -# Apply SHM DC desired state -# -------------------------- -$domainAdminCredentials = (New-Object System.Management.Automation.PSCredential ($domainAdminUsername, $(ConvertTo-SecureString $domainAdminPassword -AsPlainText -Force))) -$safeModeCredentials = (New-Object System.Management.Automation.PSCredential ($domainAdminUsername, $(ConvertTo-SecureString $safemodeAdminPassword -AsPlainText -Force))) -$commonDscParams = @{ - ArchiveContainerName = $config.storage.artifacts.containers.shmDesiredState - ArchiveResourceGroupName = $config.storage.artifacts.rg - ArchiveStorageAccountName = $config.storage.artifacts.accountName - VmLocation = $config.location - VmResourceGroupName = $config.dc.rg -} -# Fetch user and OU details -$userAccounts = $config.users.computerManagers + $config.users.serviceAccounts -foreach ($user in $userAccounts.Keys) { - $userAccounts[$user]["password"] = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $userAccounts[$user]["passwordSecretName"] -DefaultLength 20 -AsPlaintext -} -# DC1 -Add-LogMessage -Level Info "Installing desired state prerequisites on DC1..." -$null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath (Join-Path $dscPath "DC1Bootstrap.ps1") -VMName $config.dc.vmName -ResourceGroupName $config.dc.rg -SuppressOutput -$params = @{ - ActiveDirectoryBasePath = $config.dc.adDirectory - AdministratorCredentials = $domainAdminCredentials - ArtifactsBlobNamesB64 = Get-AzStorageBlob -Container $config.storage.artifacts.containers.shmArtifactsDC -Context $storageAccount.Context | ForEach-Object { $_.Name } | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - ArtifactsBlobSasTokenB64 = (New-ReadOnlyStorageAccountSasToken -SubscriptionName $config.subscriptionName -ResourceGroup $config.storage.artifacts.rg -AccountName $config.storage.artifacts.accountName) | ConvertTo-Base64 - ArtifactsStorageAccountName = $config.storage.artifacts.accountName - ArtifactsStorageContainerName = $config.storage.artifacts.containers.shmArtifactsDC - ArtifactsTargetDirectory = $config.dc.installationDirectory - DomainDn = $config.domain.dn - DomainFqdn = $config.domain.fqdn - DomainNetBiosName = $config.domain.netbiosName - DomainOusB64 = $config.domain.ous | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - DomainSecurityGroupsB64 = $config.domain.securityGroups | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - ExternalDnsResolver = $config.dc.external_dns_resolver - IdentitySubnetCidr = $config.network.vnet.subnets.identity.cidr - SafeModeCredentials = $safeModeCredentials - UserAccountsB64 = $userAccounts | ConvertTo-Json -Depth 99 | ConvertTo-Base64 -} -$null = Invoke-AzureVmDesiredState -ArchiveBlobName "DC1DesiredState.ps1.zip" ` - -ConfigurationName "ConfigurePrimaryDomainController" ` - -ConfigurationParameters $params ` - -VmName $config.dc.vmName ` - @commonDscParams -# DC2 -Add-LogMessage -Level Info "Installing desired state prerequisites on DC2..." -$null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath (Join-Path $dscPath "DC2Bootstrap.ps1") -VMName $config.dcb.vmName -ResourceGroupName $config.dc.rg -SuppressOutput -$params = @{ - ActiveDirectoryBasePath = $config.dc.adDirectory - AdministratorCredentials = $domainAdminCredentials - DomainFqdn = $config.domain.fqdn - PrimaryDomainControllerIp = $config.dc.ip - SafeModeCredentials = $safeModeCredentials -} -$null = Invoke-AzureVmDesiredState -ArchiveBlobName "DC2DesiredState.ps1.zip" ` - -ConfigurationName "ConfigureSecondaryDomainController" ` - -ConfigurationParameters $params ` - -VmName $config.dcb.vmName ` - @commonDscParams - - -# Set DNS servers for each network card then set locale and apply updates -# ----------------------------------------------------------------------- -foreach ($vmName in ($config.dc.vmName, $config.dcb.vmName)) { - # Remove custom per-NIC DNS settings - $networkCard = Get-AzNetworkInterface -ResourceGroupName $config.dc.rg -Name "${vmName}-NIC" - $networkCard.DnsSettings.DnsServers.Clear() - $null = $networkCard | Set-AzNetworkInterface - - # Set locale, install updates and reboot - Add-LogMessage -Level Info "Updating DC VM '$vmName'..." - Invoke-WindowsConfiguration -VMName $vmName -ResourceGroupName $config.dc.rg -TimeZone $config.time.timezone.windows -NtpServer ($config.time.ntp.serverFqdns)[0] -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_DNS_Zone.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_DNS_Zone.ps1 deleted file mode 100644 index 3eb7530449..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_DNS_Zone.ps1 +++ /dev/null @@ -1,32 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -Subscription $config.dns.subscriptionName -ErrorAction Stop - - -# Ensure that DNS resource group exists -# ------------------------------------- -$null = Deploy-ResourceGroup -Name $config.dns.rg -Location $config.location - - -# Create the DNS Zone and set the parent NS records if required -# ------------------------------------------------------------- -Set-DnsZoneAndParentNSRecords -DnsZoneName $config.domain.fqdn -ResourceGroupName $config.dns.rg - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Firewall.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Firewall.ps1 deleted file mode 100644 index 62a8b62cd7..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Firewall.ps1 +++ /dev/null @@ -1,161 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureMonitor -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureOperationalInsights -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Create resource group if it does not exist -# ------------------------------------------ -$null = Deploy-ResourceGroup -Name $config.monitoring.rg -Location $config.location - - -# Ensure that firewall subnet exists -# ---------------------------------- -$vnetShm = Get-VirtualNetwork $config.network.vnet.name -ResourceGroupName $config.network.vnet.rg -$null = Deploy-Subnet -Name $config.network.vnet.subnets.firewall.name -VirtualNetwork $vnetShm -AddressPrefix $config.network.vnet.subnets.firewall.cidr - - -# Create the firewall with a public IP address -# NB. the firewall needs to be in the same resource group as the VNet -# NB. it is not possible to assign a private IP address to the firewall - it will take the first available one in the subnet -# -------------------------------------------------------------------------------------------------------------------------------- -$firewall = Deploy-Firewall -Name $config.firewall.name -ResourceGroupName $config.network.vnet.rg -Location $config.location -VirtualNetworkName $config.network.vnet.name - - -# Create the logging workspace if it does not already exist -# --------------------------------------------------------- -$workspace = Deploy-LogAnalyticsWorkspace -Name $config.monitoring.loggingWorkspace.name -ResourceGroupName $config.monitoring.rg -Location $config.location - - -# Enable logging for this firewall -# -------------------------------- -Set-LogAnalyticsDiagnostics -ResourceId $firewall.Id -ResourceName $firewall.Name -WorkspaceId $workspace.ResourceId - - -# Create or retrieve the route table. -# Note that we need to disable BGP propagation or VMs might be able to jump directly to the target without going through the firewall -# ----------------------------------------------------------------------------------------------------------------------------------- -$routeTable = Deploy-RouteTable -Name $config.firewall.routeTableName -ResourceGroupName $config.network.vnet.rg -Location $config.location - - -# Set firewall rules from template -# -------------------------------- -Add-LogMessage -Level Info "Setting firewall rules from template..." -$config.firewall["privateIpAddress"] = $firewall.IpConfigurations.PrivateIpAddress -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" "shm-firewall-rules.json") -Parameters $config -AsHashtable -$ruleNameFilter = "shm-$($config.id)" - - -# Add routes to the route table -# We need to keep all routing symmetric, or it will be dropped by the firewall (see eg. https://azure.microsoft.com/en-gb/blog/accessing-virtual-machines-behind-azure-firewall-with-azure-bastion/). -# VPN gateway connections do not come via the firewall so they must return by the same route. -# All other requests should be routed via the firewall. -# Rules are applied by looking for the closest CIDR match first, so the general rule from 0.0.0.0/0 will always come last. -# ------------------------------------------------------------------------------------------------------------------------ -foreach ($route in $rules.routes) { - $null = Deploy-Route -Name $route.name -RouteTableName $config.firewall.routeTableName -AppliesTo $route.properties.addressPrefix -NextHop $route.properties.nextHop -} - - -# Attach all subnets except the VPN gateway and firewall subnets to the firewall route table -# ------------------------------------------------------------------------------------------ -$excludedSubnetNames = @($config.network.vnet.subnets.firewall.name, $config.network.vnet.subnets.gateway.name, $config.network.vnetRepositoriesTier2.subnets.deployment.name, $config.network.vnetRepositoriesTier3.subnets.deployment.name) -$vnetRepositoriesTier2 = Get-VirtualNetwork -Name $config.network.vnetRepositoriesTier2.name -ResourceGroupName $config.network.vnetRepositoriesTier2.rg -$vnetRepositoriesTier3 = Get-VirtualNetwork -Name $config.network.vnetRepositoriesTier3.name -ResourceGroupName $config.network.vnetRepositoriesTier3.rg -foreach ($vnet in @($vnetShm, $vnetRepositoriesTier2, $vnetRepositoriesTier3)) { - foreach ($subnet in $vnet.Subnets) { - if ($excludedSubnetNames.Contains($subnet.Name)) { - $vnet = Set-AzVirtualNetworkSubnetConfig -VirtualNetwork $vnet -Name $subnet.Name -AddressPrefix $subnet.AddressPrefix -RouteTable $null | Set-AzVirtualNetwork - } else { - $vnet = Set-AzVirtualNetworkSubnetConfig -VirtualNetwork $vnet -Name $subnet.Name -AddressPrefix $subnet.AddressPrefix -RouteTable $routeTable | Set-AzVirtualNetwork - } - } -} - - -# Application rules -# ----------------- -foreach ($ruleCollectionName in $firewall.ApplicationRuleCollections | Where-Object { $_.Name -like "$ruleNameFilter*" } | ForEach-Object { $_.Name }) { - $null = $firewall.RemoveApplicationRuleCollectionByName($ruleCollectionName) - Add-LogMessage -Level Info "Removed existing '$ruleCollectionName' application rule collection." -} -foreach ($ruleCollection in $rules.applicationRuleCollections) { - Add-LogMessage -Level Info "Setting rules for application rule collection '$($ruleCollection.name)'..." - foreach ($rule in $ruleCollection.properties.rules) { - $params = @{} - if ($rule.fqdnTags) { $params["TargetTag"] = $rule.fqdnTags } - if ($rule.protocols) { $params["Protocol"] = $rule.protocols } - if ($rule.targetFqdns) { $params["TargetFqdn"] = $rule.targetFqdns } - $firewall = Deploy-FirewallApplicationRule -Name $rule.name -CollectionName $ruleCollection.name -Firewall $firewall -SourceAddress $rule.sourceAddresses -Priority $ruleCollection.properties.priority -ActionType $ruleCollection.properties.action.type @params -LocalChangeOnly - } -} -if (-not $rules.applicationRuleCollections) { - Add-LogMessage -Level Warning "No application rules specified." -} - - -# Network rules -# ------------- -foreach ($ruleCollectionName in $firewall.NetworkRuleCollections | Where-Object { $_.Name -like "$ruleNameFilter*" } | ForEach-Object { $_.Name }) { - $null = $firewall.RemoveNetworkRuleCollectionByName($ruleCollectionName) - Add-LogMessage -Level Info "Removed existing '$ruleCollectionName' network rule collection." -} -Add-LogMessage -Level Info "Setting firewall network rules..." -foreach ($ruleCollection in $rules.networkRuleCollections) { - Add-LogMessage -Level Info "Setting rules for network rule collection '$($ruleCollection.name)'..." - foreach ($rule in $ruleCollection.properties.rules) { - $params = @{} - if ($rule.protocols) { - $params["Protocol"] = @($rule.protocols | ForEach-Object { $_.Split(":")[0] }) - $params["DestinationPort"] = @($rule.protocols | ForEach-Object { $_.Split(":")[1] }) - } - if ($rule.targetAddresses) { $params["DestinationAddress"] = $rule.targetAddresses } - if ($rule.targetFqdns) { $params["DestinationFqdn"] = $rule.targetFqdns } - $null = Deploy-FirewallNetworkRule -Name $rule.name -CollectionName $ruleCollection.name -Firewall $firewall -SourceAddress $rule.sourceAddresses -Priority $ruleCollection.properties.priority -ActionType $ruleCollection.properties.action.type @params -LocalChangeOnly - } -} -if (-not $rules.networkRuleCollections) { - Add-LogMessage -Level Warning "No network rules specified." -} - - -# Update remote firewall with rule changes -# ---------------------------------------- -Add-LogMessage -Level Info "[ ] Updating remote firewall with rule changes..." -$firewall = Set-AzFirewall -AzureFirewall $firewall -ErrorAction Stop -Add-LogMessage -Level Success "Updated remote firewall with rule changes." - - -# Restart domain controllers if they are running -# -------------------------------------------------- -# This ensures that they establish a new SSPR connection through the firewall in case -# it was previously blocked due to incorrect firewall rules or a deallocated firewall -if (Confirm-VmRunning -Name $config.dc.vmName -ResourceGroupName $config.dc.rg) { - Start-VM -Name $config.dc.vmName -ResourceGroupName $config.dc.rg -ForceRestart -} -if (Confirm-VmRunning -Name $config.dcb.vmName -ResourceGroupName $config.dc.rg) { - Start-VM -Name $config.dcb.vmName -ResourceGroupName $config.dc.rg -ForceRestart -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Key_Vault_And_Emergency_Admin.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Key_Vault_And_Emergency_Admin.ps1 deleted file mode 100644 index 0dc9e330e7..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Key_Vault_And_Emergency_Admin.ps1 +++ /dev/null @@ -1,355 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.KeyVault -ErrorAction Stop -Import-Module Microsoft.Graph.Authentication -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Connect to Microsoft Graph -# -------------------------- -if (-not (Get-MgContext)) { - Add-LogMessage -Level Info "Attempting to authenticate with Microsoft Graph. Please sign in with an account with admin rights over the Azure Active Directory you plan to use." - Connect-MgGraph -TenantId $config.azureAdTenantId -Scopes "User.ReadWrite.All", "UserAuthenticationMethod.ReadWrite.All", "Directory.AccessAsUser.All", "RoleManagement.ReadWrite.Directory" -ErrorAction Stop -ContextScope Process - if (Get-MgContext) { - Add-LogMessage -Level Success "Authenticated with Microsoft Graph" - } else { - Add-LogMessage -Level Fatal "Failed to authenticate with Microsoft Graph" - } -} - - -# Create secrets resource group if it does not exist -# -------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.keyVault.rg -Location $config.location - - -# Ensure the Key Vault exists and set its access policies -# ------------------------------------------------------- -$null = Deploy-KeyVault -Name $config.keyVault.name -ResourceGroupName $config.keyVault.rg -Location $config.location -Set-KeyVaultPermissions -Name $config.keyVault.name -GroupName $config.azureAdminGroupName - - -# Ensure that secrets exist in the Key Vault -# ------------------------------------------ -Add-LogMessage -Level Info "Ensuring that secrets exist in Key Vault '$($config.keyVault.name)'..." - -# :: AAD Emergency Administrator username -$null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.aadEmergencyAdminUsername -DefaultValue "aad.admin.emergency.access" -AsPlaintext -if ($?) { - Add-LogMessage -Level Success "AAD emergency administrator account username exists" -} else { - Add-LogMessage -Level Fatal "Failed to create AAD Emergency Global Administrator username!" -} - -# :: AAD Emergency Administrator password -$null = Resolve-KeyVaultSecret -VaultName $config.keyVault.Name -SecretName $config.keyVault.secretNames.aadEmergencyAdminPassword -DefaultLength 20 -AsPlaintext -if ($?) { - Add-LogMessage -Level Success "AAD emergency administrator account password exists" -} else { - Add-LogMessage -Level Fatal "Failed to create AAD Emergency Global Administrator password!" -} - -# :: Admin usernames -try { - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.domainAdminUsername -DefaultValue "domain$($config.id)admin".ToLower() -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vmAdminUsername -DefaultValue "shm$($config.id)admin".ToLower() -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.users.serviceAccounts.aadLocalSync.usernameSecretName -DefaultValue $config.users.serviceAccounts.aadLocalSync.samAccountName -AsPlaintext - Add-LogMessage -Level Success "Ensured that SHM admin usernames exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that SHM admin usernames exist!" -Exception $_.Exception -} -# :: VM admin passwords -try { - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.domainAdminPassword -DefaultLength 20 -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.dc.safemodePasswordSecretName -DefaultLength 20 -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.monitoring.updateServers.linux.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - foreach ($repositoryTier in $config.repository.Keys) { - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.repository[$repositoryTier].nexus.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - } - foreach ($mirrorType in $config.mirrors.Keys) { - foreach ($mirrorTier in $config.mirrors[$mirrorType].Keys) { - foreach ($mirrorDirection in $config.mirrors[$mirrorType][$mirrorTier].Keys) { - $adminPasswordSecretName = $config.mirrors[$mirrorType][$mirrorTier][$mirrorDirection].adminPasswordSecretName - if ($adminPasswordSecretName) { $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $adminPasswordSecretName -DefaultLength 20 -AsPlaintext } - } - } - } - Add-LogMessage -Level Success "Ensured that SHM VM admin passwords exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that SHM VM admin passwords exist!" -Exception $_.Exception -} -# :: Computer manager users -try { - $computerManagers = $config.users.computerManagers - foreach ($user in $computerManagers.Keys) { - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $computerManagers[$user]["passwordSecretName"] -DefaultLength 20 -AsPlaintext - } - Add-LogMessage -Level Success "Ensured that domain joining passwords exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that domain joining passwords exist!" -Exception $_.Exception -} -# :: Service accounts -try { - $serviceAccounts = $config.users.serviceAccounts - foreach ($user in $serviceAccounts.Keys) { - $null = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $serviceAccounts[$user]["passwordSecretName"] -DefaultLength 20 -AsPlaintext - } - Add-LogMessage -Level Success "Ensured that service account passwords exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that service account passwords exist!" -Exception $_.Exception -} - - -# Set Emergency Admin user properties -# ----------------------------------- -$username = Resolve-KeyVaultSecret -VaultName $config.keyVault.Name -SecretName $config.keyVault.secretNames.aadEmergencyAdminUsername -AsPlaintext -$userPrincipalName = "$username@$($config.domain.fqdn)" -$params = @{ - MailNickName = $username - DisplayName = "AAD Admin - EMERGENCY ACCESS" - PasswordProfile = @{ - Password = Resolve-KeyVaultSecret -VaultName $config.keyVault.Name -SecretName $config.keyVault.secretNames.aadEmergencyAdminPassword -AsPlaintext - ForceChangePasswordNextSignInWithMfa = $false - ForceChangePasswordNextSignIn = $false - } - UserType = "Member" - AccountEnabled = $true - PasswordPolicies = "DisablePasswordExpiration" - UsageLocation = $config.organisation.countryCode -} - - -# Ensure emergency admin user exists -# ---------------------------------- -Add-LogMessage -Level Info "Ensuring AAD emergency administrator account exists..." -$globalAdminUser = Get-MgUser | Where-Object { $_.UserPrincipalName -eq $userPrincipalName } -if ($globalAdminUser) { - # Update existing user - $globalAdminUser = Update-MgUser -UserId $globalAdminUser.Id @params - if ($?) { - Add-LogMessage -Level Success "Existing AAD emergency administrator account updated." - } else { - Add-LogMessage -Level Fatal "Failed to update existing AAD emergency administrator account!" - } -} else { - # Create new user - $globalAdminUser = New-MgUser -UserPrincipalName $userPrincipalName @params - if ($?) { - Add-LogMessage -Level Success "AAD emergency administrator account created." - } else { - Add-LogMessage -Level Fatal "Failed to create AAD emergency administrator account!" - } -} - -# Ensure emergency admin account has full administrator rights -# ------------------------------------------------------------ -$globalAdminRoleName = "Global Administrator" -Add-LogMessage -Level Info "Ensuring AAD emergency administrator has '$globalAdminRoleName' role..." -$globalAdminRole = Get-MgDirectoryRole | Where-Object { $_.DisplayName -eq $globalAdminRoleName } -# If role instance does not exist, instantiate it based on the role template -if ($null -eq $globalAdminRole) { - Add-LogMessage -Level Info "'$globalAdminRoleName' does not exist. Creating role from template..." - # Instantiate an instance of the role template - $globalAdminRoleTemplate = Get-MgDirectoryRoleTemplate | Where-Object { $_.DisplayName -eq $globalAdminRoleName } - New-MgDirectoryRole -RoleTemplateId $globalAdminRoleTemplate.Id - if ($?) { - Add-LogMessage -Level Success "'$globalAdminRoleName' created from template." - } else { - Add-LogMessage -Level Fatal "Failed to create '$globalAdminRoleName' from template!" - } - # Fetch role instance again - $globalAdminRole = Get-MgDirectoryRole | Where-Object { $_.DisplayName -eq $globalAdminRoleName } -} -# Ensure user is assigned to the role -$globalAdminUser = Get-MgUser | Where-Object { $_.UserPrincipalName -eq $userPrincipalName } -$userHasRole = Get-MgDirectoryRoleMember -DirectoryRoleId $globalAdminRole.Id | Where-Object { $_.Id -eq $globalAdminUser.Id } -if ($userHasRole) { - Add-LogMessage -Level Success "AAD emergency administrator already has '$globalAdminRoleName' role." -} else { - $null = New-MgDirectoryRoleMemberByRef -DirectoryRoleId $globalAdminRole.Id -BodyParameter @{"@odata.id" = "https://graph.microsoft.com/v1.0/directoryObjects/$($globalAdminUser.Id)" } - $userHasRole = Get-MgDirectoryRoleMember -DirectoryRoleId $globalAdminRole.Id | Where-Object { $_.Id -eq $globalAdminUser.Id } - if ($userHasRole) { - Add-LogMessage -Level Success "Granted AAD emergency administrator '$globalAdminRoleName' role." - } else { - Add-LogMessage -Level Failure "Failed to grant AAD emergency administrator '$globalAdminRoleName' role!" - } -} - - -# Ensure that certificates exist -# ------------------------------ -try { - # Certificate validities - $caValidityMonths = 27 # The CAB standard now limits certificates to 825 days - $caValidityDays = (Get-Date | ForEach-Object { $_.AddMonths($caValidityMonths) - $_ }).Days - $clientValidityMonths = 24 # 2 years - $clientValidityDays = (Get-Date | ForEach-Object { $_.AddMonths($clientValidityMonths) - $_ }).Days - - # Generate all certificates in a single folder for easier cleanup - $certFolderPath = (New-Item -ItemType "directory" -Path "$((New-TemporaryFile).FullName).certificates").FullName - $caStem = "SHM-$($config.id)-P2S-CA".ToUpper() - $caCrtPath = Join-Path $certFolderPath "${caStem}.crt" - $caKeyPath = Join-Path $certFolderPath "${caStem}.key" - $caPfxPath = Join-Path $certFolderPath "${caStem}.pfx" - $clientStem = "SHM-$($config.id)-P2S-CLIENT".ToUpper() - $clientCrtPath = Join-Path $certFolderPath "${clientStem}.crt" - $clientCsrPath = Join-Path $certFolderPath "${clientStem}.csr" - $clientPkcs7Path = Join-Path $certFolderPath "${clientStem}.p7b" - - # Ensure that CA certificate exists in the Key Vault - # -------------------------------------------------- - Add-LogMessage -Level Info "Ensuring that self-signed CA certificate exists in the '$($config.keyVault.name)' Key Vault..." - # Check whether CA certificate exists - $vpnCaCertificate = Resolve-KeyVaultPrivateKeyCertificate -VaultName $config.keyVault.name -CertificateName $config.keyVault.secretNames.vpnCaCertificate - if (-not $vpnCaCertificate) { - Add-LogMessage -Level Info "Creating new self-signed CA certificate..." - - # Create self-signed CA certificate with private key - # -------------------------------------------------- - Add-LogMessage -Level Info "[ ] Generating self-signed certificate locally" - $vpnCaCertPassword = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vpnCaCertPassword -DefaultLength 20 -AsPlaintext - openssl req -subj "/CN=$caStem" -new -newkey rsa:2048 -sha256 -days $caValidityDays -nodes -x509 -keyout $caKeyPath -out $caCrtPath - openssl pkcs12 -in $caCrtPath -inkey $caKeyPath -export -out $caPfxPath -password "pass:$vpnCaCertPassword" - if ($?) { - Add-LogMessage -Level Success "Generating self-signed certificate succeeded" - } else { - Add-LogMessage -Level Fatal "Generating self-signed certificate failed!" - } - - # Upload the CA key + cert bundle to the Key Vault - # ------------------------------------------------ - Add-LogMessage -Level Info "[ ] Uploading CA private key + certificate bundle as certificate $($config.keyVault.secretNames.vpnCaCertificate) (includes private key)" - $null = Import-AzKeyVaultCertificate -VaultName $config.keyVault.name -Name $config.keyvault.secretNames.vpnCaCertificate -FilePath $caPfxPath -Password (ConvertTo-SecureString $vpnCaCertPassword -AsPlainText -Force); - if ($?) { - Add-LogMessage -Level Success "Uploading the full CA certificate succeeded" - } else { - Add-LogMessage -Level Fatal "Uploading the full CA certificate failed!" - } - - # # NB. this is not working at present - OSX reports that the CA certificate "is not standards compliant" - # # Generate a self-signed CA certificate in the Key Vault - # # ------------------------------------------------------ - # Add-LogMessage -Level Info "[ ] Generating self-signed certificate in the '$($config.keyVault.name)' Key Vault" - # $caPolicy = New-AzKeyVaultCertificatePolicy -SecretContentType "application/x-pkcs12" -KeyType "RSA" -KeyUsage @("KeyCertSign", "CrlSign") -Ekus @("1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.2", "2.5.29.37.0") -KeySize 2048 -SubjectName "CN=$caStem" -ValidityInMonths $caValidityMonths -IssuerName "Self" - # $caPolicy.Exportable = $true - # $certificateOperation = Add-AzKeyVaultCertificate -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnCaCertificate -CertificatePolicy $caPolicy - # while ($status -ne "completed") { - # $status = (Get-AzKeyVaultCertificateOperation -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnCaCertificate).Status - # $progress = [math]::min(100, $progress + 9) - # Write-Progress -Activity "Certificate creation:" -Status $status -PercentComplete $progress - # Start-Sleep 1 - # } - # if ($?) { - # Add-LogMessage -Level Success "Generating self-signed certificate succeeded" - # } else { - # Add-LogMessage -Level Fatal "Generating self-signed certificate failed!" - # } - - # Store plain CA certificate as a Key Vault secret - # ------------------------------------------------ - Add-LogMessage -Level Info "[ ] Uploading the plain CA certificate as secret $($config.keyVault.secretNames.vpnCaCertificatePlain) (without private key)" - $vpnCaCertificate = (Get-AzKeyVaultCertificate -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnCaCertificate).Certificate - # Extract the public certificate and encode it as a Base64 string, without the header and footer lines and with a space every 64 characters - $vpnCaCertificateB64String = [System.Convert]::ToBase64String($vpnCaCertificate.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert)) - $vpnCaCertificatePlain = ($vpnCaCertificateB64String -split '(.{64})' | Where-Object { $_ }) -join " " - $null = Set-AzKeyVaultSecret -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnCaCertificatePlain -SecretValue (ConvertTo-SecureString "$vpnCaCertificatePlain" -AsPlainText -Force) - if ($?) { - Add-LogMessage -Level Success "Uploading the plain CA certificate succeeded" - } else { - Add-LogMessage -Level Fatal "Uploading the plain CA certificate failed!" - } - } - - # Generate or retrieve client certificate - # --------------------------------------- - Add-LogMessage -Level Info "Ensuring that client certificate exists in the '$($config.keyVault.name)' Key Vault..." - # Check whether client certificate exists - $vpnClientCertificate = Resolve-KeyVaultPrivateKeyCertificate -VaultName $config.keyVault.name -CertificateName $config.keyVault.secretNames.vpnClientCertificate - if (-not $vpnClientCertificate) { - Add-LogMessage -Level Info "Creating new client certificate..." - - # Load CA certificate into local PFX file and extract the private key - # ------------------------------------------------------------------- - Add-LogMessage -Level Info "[ ] Loading CA private key from Key Vault..." - $caPfxBase64 = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vpnCaCertificate -AsPlaintext - [IO.File]::WriteAllBytes($caPfxPath, [System.Convert]::FromBase64String($caPfxBase64)) - $caKeyData = openssl pkcs12 -in $caPfxPath -nocerts -nodes -passin pass: - $caKeyData.Where( { $_ -like "-----BEGIN PRIVATE KEY-----" }, 'SkipUntil') | Out-File -FilePath $caKeyPath - $caKeyMD5 = openssl rsa -noout -modulus -in $caKeyPath | openssl md5 - if ($?) { - Add-LogMessage -Level Success "Loading CA private key succeeded" - } else { - Add-LogMessage -Level Fatal "Loading CA private key failed!" - } - - # Split CA certificate into key and certificate - # --------------------------------------------- - Add-LogMessage -Level Info "[ ] Retrieving CA plain certificate..." - # Write CA certificate to a file after stripping headers and reflowing to a maximum of 64 characters per line - $vpnCaCertificatePlain = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vpnCaCertificatePlain -AsPlaintext - "-----BEGIN CERTIFICATE-----" | Out-File -FilePath $caCrtPath - $vpnCaCertificatePlain.Replace(" ", "") -split '(.{64})' | Where-Object { $_ } | Out-File -Append -FilePath $caCrtPath - "-----END CERTIFICATE-----" | Out-File -Append -FilePath $caCrtPath - $caCrtMD5 = openssl x509 -noout -modulus -in $caCrtPath | openssl md5 - if ($caKeyMD5 -eq $caCrtMD5) { - Add-LogMessage -Level Success "Validated CA certificate retrieval using MD5" - } else { - Add-LogMessage -Level Fatal "Failed to validate CA certificate retrieval using MD5!" - } - - # Generate a CSR in the Key Vault - # ------------------------------- - Add-LogMessage -Level Info "[ ] Creating new certificate signing request to be signed by the CA certificate..." - if ($status -ne "inProgress") { - $clientPolicy = New-AzKeyVaultCertificatePolicy -SubjectName "CN=$clientStem" -ValidityInMonths $clientValidityMonths -IssuerName "Unknown" - $null = Add-AzKeyVaultCertificate -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnClientCertificate -CertificatePolicy $clientPolicy - } - $certificateOperation = Get-AzKeyVaultCertificateOperation -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnClientCertificate - $success = $? - # Write the CSR after reflowing to a maximum of 64 characters per line - "-----BEGIN CERTIFICATE REQUEST-----" | Out-File -FilePath $clientCsrPath - $certificateOperation.CertificateSigningRequest -split '(.{64})' | Where-Object { $_ } | Out-File -Append -FilePath $clientCsrPath - "-----END CERTIFICATE REQUEST-----" | Out-File -Append -FilePath $clientCsrPath - if ($success) { - Add-LogMessage -Level Success "CSR creation succeeded" - } else { - Add-LogMessage -Level Fatal "CSR creation failed!" - } - - # Sign the client certificate - create a PKCS#7 file from full certificate chain and merge it with the private key - # ---------------------------------------------------------------------------------------------------------------- - Add-LogMessage -Level Info "[ ] Signing the CSR and merging into the '$($config.keyVault.secretNames.vpnClientCertificate)' certificate..." - $vpnClientCertPassword = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vpnClientCertPassword -DefaultLength 20 -AsPlaintext - openssl x509 -req -in $clientCsrPath -CA $caCrtPath -CAkey $caKeyPath -CAcreateserial -out $clientCrtPath -days $clientValidityDays -sha256 - openssl crl2pkcs7 -nocrl -certfile $clientCrtPath -certfile $caCrtPath -out $clientPkcs7Path 2>&1 | Out-Null - $null = Import-AzKeyVaultCertificate -VaultName $config.keyVault.name -Name $config.keyVault.secretNames.vpnClientCertificate -FilePath $clientPkcs7Path -Password (ConvertTo-SecureString "$vpnClientCertPassword" -AsPlainText -Force) - if ($?) { - Add-LogMessage -Level Success "Importing the signed client certificate succeeded" - } else { - Add-LogMessage -Level Fatal "Importing the signed client certificate failed!" - } - } -} finally { - # Delete local copies of certificates and private keys - Get-ChildItem $certFolderPath -Recurse | Remove-Item -Recurse -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Monitoring.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Monitoring.ps1 deleted file mode 100644 index 5b1b1b5324..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Monitoring.ps1 +++ /dev/null @@ -1,322 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.OperationalInsights -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureAutomation -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureMonitor -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureOperationalInsights -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzurePrivateDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Create resource group if it does not exist -# ------------------------------------------ -$null = Deploy-ResourceGroup -Name $config.monitoring.rg -Location $config.location - - -# Deploy log analytics workspace -# ------------------------------ -$workspace = Deploy-LogAnalyticsWorkspace -Name $config.monitoring.loggingWorkspace.name -ResourceGroupName $config.monitoring.rg -Location $config.location - - -# Deploy automation account and connect to log analytics -# ------------------------------------------------------ -$account = Deploy-AutomationAccount -Name $config.monitoring.automationAccount.name -ResourceGroupName $config.monitoring.rg -Location $config.location -$null = Connect-AutomationAccountLogAnalytics -AutomationAccountName $account.AutomationAccountName -LogAnalyticsWorkspace $workspace - - -# Connect log analytics workspace to private link scope -# Note that we cannot connect a private endpoint directly to a log analytics workspace -# ------------------------------------------------------------------------------------ -$logAnalyticsLink = Deploy-MonitorPrivateLinkScope -Name $config.monitoring.privatelink.name -ResourceGroupName $config.monitoring.rg -$null = Connect-PrivateLinkToLogWorkspace -LogAnalyticsWorkspace $workspace -PrivateLinkScope $logAnalyticsLink - - -# Create private endpoints for the automation account and log analytics link -# -------------------------------------------------------------------------- -$monitoringSubnet = Get-Subnet -Name $config.network.vnet.subnets.monitoring.name -VirtualNetworkName $config.network.vnet.name -ResourceGroupName $config.network.vnet.rg -$accountEndpoint = Deploy-AutomationAccountEndpoint -Account $account -Subnet $monitoringSubnet -$logAnalyticsEndpoint = Deploy-MonitorPrivateLinkScopeEndpoint -PrivateLinkScope $logAnalyticsLink -Subnet $monitoringSubnet -Location $config.location - - -# Create private DNS records for each endpoint DNS entry -# ------------------------------------------------------ -$DnsConfigs = $accountEndpoint.CustomDnsConfigs + $logAnalyticsEndpoint.CustomDnsConfigs -# Only these exact domains are available as privatelink.{domain} through Azure Private DNS -# See https://docs.microsoft.com/en-us/azure/private-link/private-endpoint-dns -$PrivateLinkDomains = @( - "agentsvc.azure-automation.net", - "azure-automation.net", # note this must come after 'agentsvc.azure-automation.net' - "blob.core.windows.net", - "monitor.azure.com", - "ods.opinsights.azure.com", - "oms.opinsights.azure.com" -) -foreach ($DnsConfig in $DnsConfigs) { - $BaseDomain = $PrivateLinkDomains | Where-Object { $DnsConfig.Fqdn.Endswith($_) } | Select-Object -First 1 # we want the first (most specific) match - if ($BaseDomain) { - $privateZone = Deploy-PrivateDnsZone -Name "privatelink.${BaseDomain}" -ResourceGroup $config.network.vnet.rg - $recordName = $DnsConfig.Fqdn.Substring(0, $DnsConfig.Fqdn.IndexOf($BaseDomain) - 1) - $null = Deploy-PrivateDnsRecordSet -Name $recordName -ZoneName $privateZone.Name -ResourceGroupName $privateZone.ResourceGroupName -PrivateIpAddresses $DnsConfig.IpAddresses -Ttl 10 - # Connect the private DNS zones to all virtual networks in the SHM - # Note that this must be done before connecting the VMs to log analytics to ensure that they use the private link - foreach ($virtualNetwork in Get-VirtualNetwork -ResourceGroupName $config.network.vnet.rg) { - $null = Connect-PrivateDnsToVirtualNetwork -DnsZone $privateZone -VirtualNetwork $virtualNetwork - } - } else { - Add-LogMessage -Level Fatal "No zone created for '$($DnsConfig.Fqdn)'!" - } -} - - -# Schedule updates for all connected VMs -# -------------------------------------- -$null = Deploy-LogAnalyticsSolution -Workspace $workspace -SolutionType "Updates" -$shmQuery = Deploy-AutomationAzureQuery -Account $account -ResourceGroups (Get-ShmResourceGroups -shmConfig $config) -$localTimeZone = Get-TimeZone -Id $config.time.timezone.linux -# Create Windows VM virus definitions update schedule -$windowsDailySchedule = Deploy-AutomationScheduleInDays -Account $account ` - -Name "shm-$($config.id)-windows-definitions".ToLower() ` - -Time "$($config.monitoring.updateServers.schedule.daily_definition_updates.hour):$($config.monitoring.updateServers.schedule.daily_definition_updates.minute)" ` - -TimeZone $localTimeZone -$null = Register-VmsWithAutomationSchedule -Account $account ` - -DurationHours 1 ` - -IncludedUpdateCategories @("Definition") ` - -Query $shmQuery ` - -Schedule $windowsDailySchedule ` - -VmType "Windows" -# Create Windows VM other updates schedule -$windowsWeeklySchedule = Deploy-AutomationScheduleInDays -Account $account ` - -DayInterval 7 ` - -Name "shm-$($config.id)-windows-updates".ToLower() ` - -StartDayOfWeek $config.monitoring.updateServers.schedule.weekly_system_updates.day ` - -Time "$($config.monitoring.updateServers.schedule.weekly_system_updates.hour):$($config.monitoring.updateServers.schedule.weekly_system_updates.minute)" ` - -TimeZone $localTimeZone -$null = Register-VmsWithAutomationSchedule -Account $account ` - -DurationHours 3 ` - -IncludedUpdateCategories @("Critical", "FeaturePack", "Security", "ServicePack", "Tools", "Unclassified", "UpdateRollup", "Updates") ` - -Query $shmQuery ` - -Schedule $windowsWeeklySchedule ` - -VmType "Windows" -# Create Linux VM update schedule -$linuxWeeklySchedule = Deploy-AutomationScheduleInDays -Account $account ` - -DayInterval 7 ` - -Name "shm-$($config.id)-linux-updates".ToLower() ` - -StartDayOfWeek $config.monitoring.updateServers.schedule.weekly_system_updates.day ` - -Time "$($config.monitoring.updateServers.schedule.weekly_system_updates.hour):$($config.monitoring.updateServers.schedule.weekly_system_updates.minute)" ` - -TimeZone $localTimeZone -$null = Register-VmsWithAutomationSchedule -Account $account ` - -DurationHours 3 ` - -IncludedUpdateCategories @("Critical", "Other", "Security", "Unclassified") ` - -Query $shmQuery ` - -Schedule $linuxWeeklySchedule ` - -VmType "Linux" - - -# Enable the collection of syslog logs from Linux hosts -# ----------------------------------------------------- -$null = Enable-AzOperationalInsightsLinuxSyslogCollection -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -# Delete all existing syslog sources -$sources = Get-AzOperationalInsightsDataSource -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -Kind 'LinuxSysLog' -foreach ($source in $sources) { - $null = Remove-AzOperationalInsightsDataSource -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -Name $source.Name -Force -} -# Syslog facilities: -# See -# - https://wiki.gentoo.org/wiki/Rsyslog#Facility -# - https://tools.ietf.org/html/rfc5424 (page 10) -# - https://rsyslog.readthedocs.io/en/latest/configuration/filters.html -$facilities = @{ - "auth" = "security/authorization messages"; - "authpriv" = "non-system authorization messages"; - "cron" = "clock daemon"; - "daemon" = "system daemons"; - "ftp" = "FTP daemon"; - "kern" = "kernel messages"; - "lpr" = "line printer subsystem"; - "mail" = "mail system"; - "news" = "network news subsystem"; - "syslog" = "messages generated internally by syslogd"; - "user" = "user-level messages"; - "uucp" = "UUCP subsystem"; -} -# Syslog severities: -# See -# - https://wiki.gentoo.org/wiki/Rsyslog#Severity -# - https://tools.ietf.org/html/rfc5424 (page 11) -# -# Emergency: system is unusable -# Alert: action must be taken immediately -# Critical: critical conditions -# Error: error conditions -# Warning: warning conditions -# Notice: normal but significant condition -# Informational: informational messages -# Debug: debug-level messages -foreach ($facility in $facilities.GetEnumerator()) { - $null = New-AzOperationalInsightsLinuxSyslogDataSource -CollectAlert ` - -CollectCritical ` - -CollectDebug ` - -CollectEmergency ` - -CollectError ` - -CollectInformational ` - -CollectNotice ` - -CollectWarning ` - -Facility $facility.Key ` - -Force ` - -Name "Linux-syslog-$($facility.Key)" ` - -ResourceGroupName $workspace.ResourceGroupName ` - -WorkspaceName $workspace.Name - if ($?) { - Add-LogMessage -Level Success "Logging activated for '$($facility.Key)' syslog facility [$($facility.Value)]." - } else { - Add-LogMessage -Level Fatal "Failed to activate logging for '$($facility.Key)' syslog facility [$($facility.Value)]!" - } -} - - -# Ensure required Windows event logs are collected -# ------------------------------------------------ -Add-LogMessage -Level Info "Ensuring required Windows event logs are being collected...'" -# Delete all existing event log sources -$sources = Get-AzOperationalInsightsDataSource -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -Kind 'WindowsEvent' -foreach ($source in $sources) { - $null = Remove-AzOperationalInsightsDataSource -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -Name $source.Name -Force -} -$eventLogNames = @( - "Active Directory Web Services" - "Directory Service", - "DFS Replication", - "DNS Server", - "Microsoft-Windows-Security-Netlogon/Operational", - "Microsoft-Windows-Winlogon/Operational", - "System" -) -foreach ($eventLogName in $eventLogNames) { - $sourceName = "windows-event-$eventLogName".Replace("%", "percent").Replace("/", "-per-").Replace(" ", "-").ToLower() - $null = New-AzOperationalInsightsWindowsEventDataSource -CollectErrors ` - -CollectInformation ` - -CollectWarnings ` - -EventLogName $eventLogName ` - -Name $sourceName ` - -ResourceGroupName $workspace.ResourceGroupName ` - -WorkspaceName $workspace.Name - if ($?) { - Add-LogMessage -Level Success "Logging activated for '$eventLogName'." - } else { - Add-LogMessage -Level Fatal "Failed to activate logging for '$eventLogName'!" - } -} - - -# Ensure require Windows performance counters are collected -# --------------------------------------------------------- -Add-LogMessage -Level Info "Ensuring required Windows performance counters are being collected...'" -# Delete all existing performance counter log sources -$sources = Get-AzOperationalInsightsDataSource -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -Kind 'WindowsPerformanceCounter' -foreach ($source in $sources) { - $null = Remove-AzOperationalInsightsDataSource -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -Name $source.Name -Force -} -$counters = @( - @{setName = "LogicalDisk"; counterName = "Avg. Disk sec/Read" }, - @{setName = "LogicalDisk"; counterName = "Avg. Disk sec/Write" }, - @{setName = "LogicalDisk"; counterName = "Current Disk Queue Length" }, - @{setName = "LogicalDisk"; counterName = "Disk Reads/sec" }, - @{setName = "LogicalDisk"; counterName = "Disk Transfers/sec" }, - @{setName = "LogicalDisk"; counterName = "Disk Writes/sec" }, - @{setName = "LogicalDisk"; counterName = "Free Megabytes" }, - @{setName = "Memory"; counterName = "Available MBytes" }, - @{setName = "Memory"; counterName = "% Committed Bytes In Use" }, - @{setName = "LogicalDisk"; counterName = "% Free Space" }, - @{setName = "Processor"; counterName = "% Processor Time" }, - @{setName = "System"; counterName = "Processor Queue Length" } -) -foreach ($counter in $counters) { - $sourceName = "windows-counter-$($counter.setName)-$($counter.counterName)".Replace("%", "percent").Replace("/", "-per-").Replace(" ", "-").ToLower() - $null = New-AzOperationalInsightsWindowsPerformanceCounterDataSource -CounterName $counter.counterName ` - -InstanceName "*" ` - -IntervalSeconds 60 ` - -Name $sourceName ` - -ObjectName $counter.setName ` - -ResourceGroupName $workspace.ResourceGroupName ` - -WorkspaceName $workspace.Name - if ($?) { - Add-LogMessage -Level Success "Logging activated for '$($counter.setName)/$($counter.counterName)'." - } else { - Add-LogMessage -Level Fatal "Failed to activate logging for '$($counter.setName)/$($counter.counterName)'!" - } -} - - -# Activate required Intelligence Packs -# ------------------------------------ -Add-LogMessage -Level Info "Ensuring required Log Analytics Intelligence Packs are enabled...'" -$packNames = @( - "AgentHealthAssessment", - "AzureActivity", - "AzureNetworking", - "AzureResources", - "AntiMalware", - "AzureAutomation", - "CapacityPerformance", - "ChangeTracking", - "DnsAnalytics", - "InternalWindowsEvent", - "LogManagement", - "NetFlow", - "NetworkMonitoring", - "ServiceMap", - "Updates", - "VMInsights", - "WindowsDefenderATP", - "WindowsFirewall", - "WinLog" -) -# Ensure only the selected intelligence packs are enabled -$packsAvailable = Get-AzOperationalInsightsIntelligencePack -ResourceGroupName $workspace.ResourceGroupName -WorkspaceName $workspace.Name -foreach ($pack in $packsAvailable) { - if ($pack.Name -in $packNames) { - if ($pack.Enabled) { - Add-LogMessage -Level InfoSuccess "'$($pack.Name)' Intelligence Pack already enabled." - } else { - $null = Set-AzOperationalInsightsIntelligencePack -IntelligencePackName $pack.Name -WorkspaceName $workspace.Name -ResourceGroupName $workspace.ResourceGroupName -Enabled $true - if ($?) { - Add-LogMessage -Level Success "'$($pack.Name)' Intelligence Pack enabled." - } else { - Add-LogMessage -Level Fatal "Failed to enable '$($pack.Name)' Intelligence Pack!" - } - } - } else { - if ($pack.Enabled) { - $null = Set-AzOperationalInsightsIntelligencePack -IntelligencePackName $pack.Name -WorkspaceName $workspace.Name -ResourceGroupName $workspace.ResourceGroupName -Enabled $false - if ($?) { - Add-LogMessage -Level Success "'$($pack.Name)' Intelligence Pack disabled." - } else { - Add-LogMessage -Level Fatal "Failed to disable '$($pack.Name)' Intelligence Pack!" - } - } else { - Add-LogMessage -Level InfoSuccess "'$($pack.Name)' Intelligence Pack already disabled." - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Networking.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Networking.ps1 deleted file mode 100644 index 297bfe527d..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Networking.ps1 +++ /dev/null @@ -1,112 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Create VNet resource group if it does not exist -# ----------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.network.vnet.rg -Location $config.location - - -# Create main virtual network and subnets -# --------------------------------------- -$vnet = Deploy-VirtualNetwork -Name $config.network.vnet.name -ResourceGroupName $config.network.vnet.rg -AddressPrefix $config.network.vnet.cidr -Location $config.location -DnsServer $config.dc.ip, $config.dcb.ip -$null = Deploy-Subnet -Name $config.network.vnet.subnets.firewall.name -AddressPrefix $config.network.vnet.subnets.firewall.cidr -VirtualNetwork $vnet -$gatewaySubnet = Deploy-Subnet -Name $config.network.vnet.subnets.gateway.name -AddressPrefix $config.network.vnet.subnets.gateway.cidr -VirtualNetwork $vnet -$identitySubnet = Deploy-Subnet -Name $config.network.vnet.subnets.identity.name -AddressPrefix $config.network.vnet.subnets.identity.cidr -VirtualNetwork $vnet -$monitoringSubnet = Deploy-Subnet -Name $config.network.vnet.subnets.monitoring.name -AddressPrefix $config.network.vnet.subnets.monitoring.cidr -VirtualNetwork $vnet -$updateServersSubnet = Deploy-Subnet -Name $config.network.vnet.subnets.updateServers.name -AddressPrefix $config.network.vnet.subnets.updateServers.cidr -VirtualNetwork $vnet - - -# Create package repository virtual networks and subnets -# ------------------------------------------------------ -# Tier 2 -$vnetRepositoriesTier2 = Deploy-VirtualNetwork -Name $config.network.vnetRepositoriesTier2.name -ResourceGroupName $config.network.vnetRepositoriesTier2.rg -AddressPrefix $config.network.vnetRepositoriesTier2.cidr -Location $config.location -$mirrorsExternalTier2Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier2.subnets.mirrorsExternal.name -AddressPrefix $config.network.vnetRepositoriesTier2.subnets.mirrorsExternal.cidr -VirtualNetwork $vnetRepositoriesTier2 -$mirrorsInternalTier2Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier2.subnets.mirrorsInternal.name -AddressPrefix $config.network.vnetRepositoriesTier2.subnets.mirrorsInternal.cidr -VirtualNetwork $vnetRepositoriesTier2 -$proxiesTier2Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier2.subnets.proxies.name -AddressPrefix $config.network.vnetRepositoriesTier2.subnets.proxies.cidr -VirtualNetwork $vnetRepositoriesTier2 -$deploymentTier2Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier2.subnets.deployment.name -AddressPrefix $config.network.vnetRepositoriesTier2.subnets.deployment.cidr -VirtualNetwork $vnetRepositoriesTier2 -# Tier 3 -$vnetRepositoriesTier3 = Deploy-VirtualNetwork -Name $config.network.vnetRepositoriesTier3.name -ResourceGroupName $config.network.vnetRepositoriesTier3.rg -AddressPrefix $config.network.vnetRepositoriesTier3.cidr -Location $config.location -$mirrorsExternalTier3Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier3.subnets.mirrorsExternal.name -AddressPrefix $config.network.vnetRepositoriesTier3.subnets.mirrorsExternal.cidr -VirtualNetwork $vnetRepositoriesTier3 -$mirrorsInternalTier3Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier3.subnets.mirrorsInternal.name -AddressPrefix $config.network.vnetRepositoriesTier3.subnets.mirrorsInternal.cidr -VirtualNetwork $vnetRepositoriesTier3 -$proxiesTier3Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier3.subnets.proxies.name -AddressPrefix $config.network.vnetRepositoriesTier3.subnets.proxies.cidr -VirtualNetwork $vnetRepositoriesTier3 -$deploymentTier3Subnet = Deploy-Subnet -Name $config.network.vnetRepositoriesTier3.subnets.deployment.name -AddressPrefix $config.network.vnetRepositoriesTier3.subnets.deployment.cidr -VirtualNetwork $vnetRepositoriesTier3 -# As we do not currently support Tier 4 we do not deploy any networks for it - - -# Ensure that NSGs exist with the correct rules and attach them to the correct subnet -# ----------------------------------------------------------------------------------- -# Identity -$identityNsg = Deploy-NetworkSecurityGroup -Name $config.network.vnet.subnets.identity.nsg.name -ResourceGroupName $config.network.vnet.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $identityNsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnet.subnets.identity.nsg.rules) -Parameters $config -AsHashtable) -$identitySubnet = Set-SubnetNetworkSecurityGroup -Subnet $identitySubnet -NetworkSecurityGroup $identityNsg -# Monitoring -$monitoringNsg = Deploy-NetworkSecurityGroup -Name $config.network.vnet.subnets.monitoring.nsg.name -ResourceGroupName $config.network.vnet.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $monitoringNsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnet.subnets.monitoring.nsg.rules) -Parameters $config -AsHashtable) -$monitoringSubnet = Set-SubnetNetworkSecurityGroup -Subnet $monitoringSubnet -NetworkSecurityGroup $monitoringNsg -# Update servers -$updateServersNsg = Deploy-NetworkSecurityGroup -Name $config.network.vnet.subnets.updateServers.nsg.name -ResourceGroupName $config.network.vnet.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $updateServersNsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnet.subnets.updateServers.nsg.rules) -Parameters $config -AsHashtable) -$updateServersSubnet = Set-SubnetNetworkSecurityGroup -Subnet $updateServersSubnet -NetworkSecurityGroup $updateServersNsg -# Tier 2 external mirrors -$mirrorsExternalTier2Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier2.subnets.mirrorsExternal.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier2.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $mirrorsExternalTier2Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier2.subnets.mirrorsExternal.nsg.rules) -Parameters $config -AsHashtable) -$mirrorsExternalTier2Subnet = Set-SubnetNetworkSecurityGroup -Subnet $mirrorsExternalTier2Subnet -NetworkSecurityGroup $mirrorsExternalTier2Nsg -# Tier 2 internal mirrors -$mirrorsInternalTier2Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier2.subnets.mirrorsInternal.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier2.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $mirrorsInternalTier2Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier2.subnets.mirrorsInternal.nsg.rules) -Parameters $config -AsHashtable) -$mirrorsInternalTier2Subnet = Set-SubnetNetworkSecurityGroup -Subnet $mirrorsInternalTier2Subnet -NetworkSecurityGroup $mirrorsInternalTier2Nsg -# Tier 2 proxies -$proxiesTier2Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier2.subnets.proxies.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier2.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $proxiesTier2Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier2.subnets.proxies.nsg.rules) -Parameters $config -AsHashtable) -$proxiesTier2Subnet = Set-SubnetNetworkSecurityGroup -Subnet $proxiesTier2Subnet -NetworkSecurityGroup $proxiesTier2Nsg -# Tier 2 deployment -$deploymentTier2Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier2.subnets.deployment.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier2.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $deploymentTier2Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier2.subnets.deployment.nsg.rules) -Parameters $config -AsHashtable) -$deploymentTier2Subnet = Set-SubnetNetworkSecurityGroup -Subnet $deploymentTier2Subnet -NetworkSecurityGroup $deploymentTier2Nsg -# Tier 3 external mirrors -$mirrorsExternalTier3Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier3.subnets.mirrorsExternal.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier3.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $mirrorsExternalTier3Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier3.subnets.mirrorsExternal.nsg.rules) -Parameters $config -AsHashtable) -$mirrorsExternalTier3Subnet = Set-SubnetNetworkSecurityGroup -Subnet $mirrorsExternalTier3Subnet -NetworkSecurityGroup $mirrorsExternalTier3Nsg -# Tier 3 internal mirrors -$mirrorsInternalTier3Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier3.subnets.mirrorsInternal.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier3.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $mirrorsInternalTier3Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier3.subnets.mirrorsInternal.nsg.rules) -Parameters $config -AsHashtable) -$mirrorsInternalTier3Subnet = Set-SubnetNetworkSecurityGroup -Subnet $mirrorsInternalTier3Subnet -NetworkSecurityGroup $mirrorsInternalTier3Nsg -# Tier 3 proxies -$proxiesTier3Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier3.subnets.proxies.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier3.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $proxiesTier3Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier3.subnets.proxies.nsg.rules) -Parameters $config -AsHashtable) -$proxiesTier3Subnet = Set-SubnetNetworkSecurityGroup -Subnet $proxiesTier3Subnet -NetworkSecurityGroup $proxiesTier3Nsg -# Tier 3 deployment -$deploymentTier3Nsg = Deploy-NetworkSecurityGroup -Name $config.network.vnetRepositoriesTier3.subnets.deployment.nsg.name -ResourceGroupName $config.network.vnetRepositoriesTier3.rg -Location $config.location -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $deploymentTier3Nsg -Rules (Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.network.vnetRepositoriesTier3.subnets.deployment.nsg.rules) -Parameters $config -AsHashtable) -$deploymentTier3Subnet = Set-SubnetNetworkSecurityGroup -Subnet $deploymentTier3Subnet -NetworkSecurityGroup $deploymentTier3Nsg - - -# Create the VPN gateway -# ---------------------- -$publicIp = Deploy-PublicIpAddress -Name "$($config.network.vnet.name)_GW_PIP" -ResourceGroupName $config.network.vnet.rg -AllocationMethod Static -Location $config.location -Sku Standard -$certificate = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vpnCaCertificatePlain -AsPlaintext -$null = Deploy-VirtualNetworkGateway -Name "$($config.network.vnet.name)_GW" -ResourceGroupName $config.network.vnet.rg -Location $config.location -PublicIpAddressId $publicIp.Id -SubnetId $gatewaySubnet.Id -P2SCertificate $certificate -VpnClientAddressPool $config.network.vpn.cidr - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Package_Repositories.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Package_Repositories.ps1 deleted file mode 100644 index e13138f7ee..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Package_Repositories.ps1 +++ /dev/null @@ -1,205 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Resolve the cloud init file, applying an allowlist if needed -# ----------------------------------------------------------- -function Resolve-CloudInit { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of cloud-init template file")] - [string]$CloudInitTemplateName, - [Parameter(Mandatory = $false, HelpMessage = "Hashtable containing template parameters")] - [System.Collections.IDictionary]$TemplateParameters - ) - try { - # Load template cloud-init file - $CloudInitBasePath = Join-Path $PSScriptRoot ".." "cloud_init" -Resolve - $CloudInitPath = Join-Path $CloudInitBasePath $CloudInitTemplateName -Resolve - $CloudInitTemplate = Get-Content $CloudInitPath -Raw -ErrorAction Stop - # Expand the template - $CloudInitTemplate = Expand-CloudInitResources -Template $CloudInitTemplate -ResourcePath (Join-Path $CloudInitBasePath "resources") - $CloudInitTemplate = Expand-CloudInitResources -Template $CloudInitTemplate -ResourcePath (Join-Path ".." ".." "common" "resources") - $CloudInitTemplate = Expand-CloudInitResources -Template $CloudInitTemplate -ResourcePath (Join-Path ".." ".." ".." "environment_configs" "package_lists") - $CloudInitTemplate = Expand-MustacheTemplate -Template $CloudInitTemplate -Parameters $TemplateParameters - return $CloudInitTemplate - } catch { - Add-LogMessage -Level Fatal "Failed to load cloud init file '$CloudInitPath'!" -Exception $_.Exception - } -} - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - - -# Ensure that repository resource group exists -# -------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.repositories.rg -Location $config.location - - -# Get common objects -# ------------------ -$bootDiagnosticsAccount = Deploy-StorageAccount -Name $config.storage.bootdiagnostics.accountName -ResourceGroupName $config.storage.bootdiagnostics.rg -Location $config.location -$vmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vmAdminUsername -DefaultValue "shm$($config.id)admin".ToLower() -AsPlaintext - - -# Iterate over tiers deploying requested package repositories in each case -# ------------------------------------------------------------------------ -foreach ($tier in @("tier2", "tier3")) { - # Get the virtual network for this tier and peer it to the SHM VNet - $vnetConfig = $config.network["vnetRepositories${tier}"] - $vnetRepository = Get-VirtualNetwork -Name $vnetConfig.name -ResourceGroupName $vnetConfig.rg - Set-VnetPeering -Vnet1Name $vnetRepository.Name ` - -Vnet1ResourceGroupName $vnetRepository.ResourceGroupName ` - -Vnet1SubscriptionName $config.subscriptionName ` - -Vnet2Name $config.network.vnet.name ` - -Vnet2ResourceGroupName $config.network.vnet.rg ` - -Vnet2SubscriptionName $config.subscriptionName - $deploymentSubnet = Get-Subnet -Name $vnetConfig.subnets.deployment.name -VirtualNetworkName $vnetRepository.Name -ResourceGroupName $config.network.vnet.rg - - # Deploy proxy servers if requested - if ($config.repositories[$tier].proxies) { - Add-LogMessage -Level Info "Deploying $tier package proxy server" - $proxiesSubnet = Get-Subnet -Name $vnetConfig.subnets.proxies.name -VirtualNetworkName $vnetConfig.name -ResourceGroupName $vnetConfig.rg - $vmConfig = $config.repositories[$tier].proxies.many - # Construct the cloud-init file - $config["perInstance"] = @{ - nexusAdminPassword = (Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $vmConfig.applicationAdminPasswordSecretName -DefaultLength 20 -AsPlaintext) - tier = [int]$tier.Replace("tier", "") - } - $cloudInitFileName = "cloud-init-repository-proxy.mustache.yaml".ToLower() - $CloudInitYaml = Resolve-CloudInit -CloudInitTemplateName $cloudInitFileName ` - -TemplateParameters $config - # Deploy the VM - $deploymentIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $vnetConfig.subnets.deployment.cidr -VirtualNetwork $vnetRepository - $params = @{ - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $vmConfig.adminPasswordSecretName -DefaultLength 20) - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $CloudInitYaml - ImageSku = "Ubuntu-latest" - Location = $config.location - Name = $vmConfig.vmName - OsDiskSizeGb = $vmConfig.disks.os.sizeGb - OsDiskType = $vmConfig.disks.os.type - PrivateIpAddress = $deploymentIpAddress - ResourceGroupName = $config.repositories.rg - Size = $vmConfig.vmSize - Subnet = $deploymentSubnet - } - $null = Deploy-LinuxVirtualMachine @params | Update-VMIpAddress -Subnet $proxiesSubnet -IpAddress $vmConfig.ipAddress - } - - # Deploy external mirrors if requested (this must come before the internal ones are deployed) - if ($config.repositories[$tier].mirrorsExternal) { - Add-LogMessage -Level Info "Deploying $tier external package mirrors" - $mirrorsExternalSubnet = Get-Subnet -Name $vnetConfig.subnets.mirrorsExternal.name -VirtualNetworkName $vnetConfig.name -ResourceGroupName $vnetConfig.rg - foreach ($SourceRepositoryName in $config.repositories[$tier].mirrorsExternal.Keys) { - $vmConfig = $config.repositories[$tier].mirrorsExternal[$SourceRepositoryName] - # Construct the cloud-init file - $cloudInitFileName = "cloud-init-repository-mirror-external-${SourceRepositoryName}.mustache.yaml".ToLower() - $config["perInstance"] = @{ - tier = $Tier - } - $CloudInitYaml = Resolve-CloudInit -CloudInitTemplateName $cloudInitFileName ` - -TemplateParameters $config - # Deploy the data disk - $dataDisk = Deploy-ManagedDisk -Name "$($vmConfig.vmName)-DATA-DISK" -SizeGB $vmConfig.disks.data.sizeGb -Type $vmConfig.disks.data.type -ResourceGroupName $config.repositories.rg -Location $config.location - # Deploy the VM - $deploymentIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $vnetConfig.subnets.deployment.cidr -VirtualNetwork $vnetRepository - $params = @{ - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $vmConfig.adminPasswordSecretName -DefaultLength 20) - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $CloudInitYaml - DataDiskIds = @($dataDisk.Id) - ImageSku = "Ubuntu-latest" - Location = $config.location - Name = $vmConfig.vmName - OsDiskSizeGb = $vmConfig.disks.os.sizeGb - OsDiskType = $vmConfig.disks.os.type - PrivateIpAddress = $deploymentIpAddress - ResourceGroupName = $config.repositories.rg - Size = $vmConfig.vmSize - Subnet = $deploymentSubnet - } - $null = Deploy-LinuxVirtualMachine @params | Update-VMIpAddress -Subnet $mirrorsExternalSubnet -IpAddress $vmConfig.ipAddress - # Extract the public key and save it for later use - try { - Add-LogMessage -Level Info "Extracting public SSH key to allow connections to internal mirrors" - $result = Invoke-RemoteScript -VMName $vmConfig.vmName -ResourceGroupName $config.repositories.rg -Shell "UnixShell" -Script "cat /home/mirrordaemon/.ssh/id_rsa.pub | grep '^ssh'" -SuppressOutput - $PublicKey = [string]($result.Value[0].Message -Split "`n" | Select-String "^ssh") - $config.repositories[$tier].mirrorsInternal[$SourceRepositoryName]["externalMirrorPublicKey"] = $PublicKey - Add-LogMessage -Level Success "Extracting public SSH key succeeded" - } catch { - Add-LogMessage -Level Fatal "Could not extract SSH public key" -Exception $_.Exception - } - } - } - - # Deploy internal mirrors if requested (this must come after the external ones are deployed) - if ($config.repositories[$tier].mirrorsInternal) { - Add-LogMessage -Level Info "Deploying $tier internal package mirrors" - $mirrorsInternalSubnet = Get-Subnet -Name $vnetConfig.subnets.mirrorsInternal.name -VirtualNetworkName $vnetConfig.name -ResourceGroupName $vnetConfig.rg - foreach ($SourceRepositoryName in $config.repositories[$tier].mirrorsInternal.Keys) { - $vmConfig = $config.repositories[$tier].mirrorsInternal[$SourceRepositoryName] - # Construct the cloud-init file - $config["perInstance"] = @{ - externalMirrorPublicKey = $vmConfig["externalMirrorPublicKey"] - tier = $Tier - } - $cloudInitFileName = "cloud-init-repository-mirror-internal-${SourceRepositoryName}.mustache.yaml".ToLower() - $CloudInitYaml = Resolve-CloudInit -CloudInitTemplateName $cloudInitFileName ` - -TemplateParameters $config - # Deploy the data disk - $dataDisk = Deploy-ManagedDisk -Name "$($vmConfig.vmName)-DATA-DISK" -SizeGB $vmConfig.disks.data.sizeGb -Type $vmConfig.disks.data.type -ResourceGroupName $config.repositories.rg -Location $config.location - # Deploy the VM - $deploymentIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $vnetConfig.subnets.deployment.cidr -VirtualNetwork $vnetRepository - $params = @{ - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $vmConfig.adminPasswordSecretName -DefaultLength 20) - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $CloudInitYaml - DataDiskIds = @($dataDisk.Id) - ImageSku = "Ubuntu-latest" - Location = $config.location - Name = $vmConfig.vmName - OsDiskSizeGb = $vmConfig.disks.os.sizeGb - OsDiskType = $vmConfig.disks.os.type - PrivateIpAddress = $deploymentIpAddress - ResourceGroupName = $config.repositories.rg - Size = $vmConfig.vmSize - Subnet = $deploymentSubnet - } - $null = Deploy-LinuxVirtualMachine @params | Update-VMIpAddress -Subnet $mirrorsInternalSubnet -IpAddress $vmConfig.ipAddress - # Ensure that the fingerprint for this VM is registered with the corresponding external mirror - Add-LogMessage -Level Info "Retrieving fingerprint for '$($vmConfig.vmName)'..." - $result = Invoke-RemoteScript -VMName $vmConfig.vmName -ResourceGroupName $config.repositories.rg -Shell "UnixShell" -Script "ssh-keyscan 127.0.0.1 2> /dev/null" - $internalFingerprint = $result.Value[0].Message -Split "`n" | Select-String "^127.0.0.1" | ForEach-Object { $_ -replace "127.0.0.1", $vmConfig.ipAddress } - $externalVmName = $vmConfig.vmName.Replace("INTERNAL", "EXTERNAL") - Add-LogMessage -Level Info "Registering fingerprint and IP address for '$($vmConfig.vmName)' with '$externalVmName'..." - $null = Invoke-RemoteScript -VMName $externalVmName -ResourceGroupName $config.repositories.rg -Shell "UnixShell" -Script "/home/mirrordaemon/update_known_internal_mirrors.sh '$internalFingerprint' '$($vmConfig.ipAddress)'" - } - } -} - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Register_VMs.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Register_VMs.ps1 deleted file mode 100644 index 63ede0c165..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Register_VMs.ps1 +++ /dev/null @@ -1,49 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.OperationalInsights -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureOperationalInsights -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Create resource group if it does not exist -# ------------------------------------------ -$null = Deploy-ResourceGroup -Name $config.monitoring.rg -Location $config.location - - -# Deploy log analytics workspace -# ------------------------------ -$workspace = Deploy-LogAnalyticsWorkspace -Name $config.monitoring.loggingWorkspace.name -ResourceGroupName $config.monitoring.rg -Location $config.location -$workspaceKey = Get-AzOperationalInsightsWorkspaceSharedKey -Name $workspace.Name -ResourceGroup $workspace.ResourceGroupName - - -# Ensure all SHM VMs are registered with the logging workspace -# --------------------------------------------------------------- -Add-LogMessage -Level Info "[ ] Ensuring logging agent is installed on all SHM VMs..." -$shmResourceGroups = Get-ShmResourceGroups -shmConfig $config -try { - $null = $shmResourceGroups | ForEach-Object { Get-AzVM -ResourceGroup $_.ResourceGroupName } | ForEach-Object { - Deploy-VirtualMachineMonitoringExtension -VM $_ -WorkspaceId $workspace.CustomerId -WorkspaceKey $workspaceKey.PrimarySharedKey - } - Add-LogMessage -Level Success "Ensured that logging agent is installed on all SHM VMs." -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that logging agent is installed on all SHM VMs!" -Exception $_.Exception -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Storage_Accounts.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Storage_Accounts.ps1 deleted file mode 100644 index dd2708548b..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Storage_Accounts.ps1 +++ /dev/null @@ -1,40 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - -# Ensure that boot diagnostics resource group and storage account exist -# --------------------------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.storage.bootdiagnostics.rg -Location $config.location -$null = Deploy-StorageAccount -Name $config.storage.bootdiagnostics.accountName -ResourceGroupName $config.storage.bootdiagnostics.rg -Location $config.location - - -# Ensure that artifacts resource group and storage account exist -# -------------------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.storage.artifacts.rg -Location $config.location -$storageAccount = Deploy-StorageAccount -Name $config.storage.artifacts.accountName -ResourceGroupName $config.storage.artifacts.rg -Location $config.location - - -# Create blob storage containers -# ------------------------------ -Add-LogMessage -Level Info "Ensuring that blob storage containers exist..." -foreach ($containerName in $config.storage.artifacts.containers.Values) { - $null = Deploy-StorageContainer -Name $containerName -StorageAccount $storageAccount - $null = Clear-StorageContainer -Name $containerName -StorageAccount $storageAccount -} - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/safe_haven_management_environment/setup/Setup_SHM_Update_Servers.ps1 b/deployment/safe_haven_management_environment/setup/Setup_SHM_Update_Servers.ps1 deleted file mode 100644 index 83039f8055..0000000000 --- a/deployment/safe_haven_management_environment/setup/Setup_SHM_Update_Servers.ps1 +++ /dev/null @@ -1,60 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId -) - -Import-Module Az.Accounts -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.subscriptionName -ErrorAction Stop - - -# Ensure that boot diagnostics and monitoring resource groups exist -# ----------------------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.storage.bootdiagnostics.rg -Location $config.location -$null = Deploy-ResourceGroup -Name $config.monitoring.rg -Location $config.location - - -# Ensure that boot diagnostics storage account exists -# --------------------------------------------------- -$null = Deploy-StorageAccount -Name $config.storage.bootdiagnostics.accountName -ResourceGroupName $config.storage.bootdiagnostics.rg -Location $config.location - - -# Deploy the Linux update server -# ------------------------------ -$cloudInitYaml = Expand-CloudInitResources -Template (Join-Path $PSScriptRoot ".." "cloud_init" "cloud-init-update-server-linux.mustache.yaml") -ResourcePath (Join-Path ".." ".." "common" "resources") -$cloudInitYaml = Expand-MustacheTemplate -TemplatePath $cloudInitYaml -Parameters $config -$bootDiagnosticsAccount = Deploy-StorageAccount -Name $config.storage.bootdiagnostics.accountName -ResourceGroupName $config.storage.bootdiagnostics.rg -Location $config.location -$vmName = $config.monitoring.updateServers.linux.vmName -$params = @{ - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.monitoring.updateServers.linux.adminPasswordSecretName -DefaultLength 20) - AdminUsername = (Resolve-KeyVaultSecret -VaultName $config.keyVault.name -SecretName $config.keyVault.secretNames.vmAdminUsername -DefaultValue "shm$($config.id)admin".ToLower() -AsPlaintext) - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $cloudInitYaml - ImageSku = "Ubuntu-latest" - Location = $config.location - Name = $vmName - OsDiskSizeGb = $config.monitoring.updateServers.linux.disks.os.sizeGb - OsDiskType = $config.monitoring.updateServers.linux.disks.os.type - PrivateIpAddress = $config.monitoring.updateServers.linux.ip - ResourceGroupName = $config.monitoring.rg - Size = $config.monitoring.updateServers.linux.vmSize - Subnet = (Get-Subnet -Name $config.network.vnet.subnets.updateServers.name -ResourceGroupName $config.network.vnet.rg -VirtualNetworkName $config.network.vnet.name) -} -Deploy-LinuxVirtualMachine @params | Start-VM - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-1804.mustache.yaml b/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-1804.mustache.yaml deleted file mode 100644 index fe79bd30e7..0000000000 --- a/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-1804.mustache.yaml +++ /dev/null @@ -1,409 +0,0 @@ -#cloud-config - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: Etc/UTC - -# Update package database on first boot -package_update: true - -# Upgrade installed packages on first boot -package_upgrade: true - -apt: - # Append to the existing /etc/apt/sources.list - preserve_sources_list: true - - # Add repositories - sources: - dbeaver.list: - source: "deb https://dbeaver.io/debs/dbeaver-ce /" - keyid: 98F5A7CC1ABE72AC3852A007D33A1BD725ED047D # DBeaver Co. (DBeaver Co. GPG key) - - microsoft-general.list: - source: "deb [arch=amd64] https://packages.microsoft.com/ubuntu/18.04/prod bionic main" - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - microsoft-vscode.list: - source: "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main" - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - nvidia-cuda.list: - source: "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" - keyid: EB693B3035CD5710E231E123A4B469963BF863CC # cudatools - - nvidia-ml.list: - source: "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" - keyid: AE09FE4BBD223A84B2CCFCE3F60F4B3D7FA2AF80 # cudatools - - postgresql.list: - source: "deb http://apt.postgresql.org/pub/repos/apt/ bionic-pgdg main" - keyid: B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # PostgreSQL Debian Repository - - qgis.list: - source: "deb https://ubuntu.qgis.org/ubuntu bionic main" - keyid: 2D7E3441A707FDB3E7059441D155B8E6A419C5BE # QGIS Archive Automatic Signing Key (2022-2027) - - r-base-version.list: - source: "deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/" - keyid: E298A3A825C0D65DFD57CBB651716619E084DAB9 # Michael Rutter - -write_files: - - path: "/etc/jaas.conf" - permissions: "0444" - content: | - {{jaas.conf}} - - - path: "/opt/build/azuredatastudio.debinfo" - permissions: "0400" - content: | - {{deb-azuredatastudio.version}} - - - path: "/opt/build/check_installation.py" - permissions: "0400" - content: | - {{check_installation.py}} - - - path: "/opt/build/dbeaver_drivers_config.xml" - permissions: "0444" - content: | - {{dbeaver_drivers_config.mustache.xml}} - - - path: "/opt/build/deprovision_vm.sh" - permissions: "0500" - content: | - {{deprovision_vm.sh}} - - - path: "/opt/build/download_and_install_deb.sh" - permissions: "0500" - content: | - {{download_and_install_deb.sh}} - - - path: "/opt/build/packages/packages-julia.list" - permissions: "0444" - content: | - {{packages-julia.list}} - - - path: "/opt/build/packages/packages-r-bioconductor.list" - permissions: "0444" - content: | - {{packages-r-bioconductor.list}} - - - path: "/opt/build/packages/packages-r-cran.list" - permissions: "0444" - content: | - {{packages-r-cran.list}} - - - path: "/opt/build/packages/packages-python-system.txt" - permissions: "0444" - content: | - {{packages-python-system.txt}} - - - path: "/opt/build/pyenv/build_requirements.py" - permissions: "0500" - content: | - {{pyenv_build_requirements.py}} - - - path: "/opt/build/pyenv/install.sh" - permissions: "0500" - content: | - {{pyenv_install.sh}} - - - path: "/opt/build/pyenv/packages-python.yaml" - permissions: "0400" - content: | - {{packages-python.yaml}} - - - path: "/opt/build/rstudio.debinfo" - permissions: "0400" - content: | - {{deb-rstudio-focal.version}} - - - path: "/opt/monitoring/analyse_build.py" - permissions: "0755" - content: | - {{analyse_build.py}} - - - path: "/opt/monitoring/deprovision.log" - permissions: "0600" - content: | - # Deprovisioning log - - - path: "/usr/share/applications/pycharm.desktop" - permissions: "0644" - content: | - {{pycharm.desktop}} - -# Packages to install with snap -snap: - commands: - - snap install {{packages-snap.list}} - -# List of packages to install with apt-get -packages: - - {{packages-apt.list}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -# List of commands to run using `/bin/sh` -# When changing these lines try to ensure that everything is checked as close as possible to the point when it is installed/configured. -# If any of the installation/configuration checks fail then end the cloud-init process immediately by calling `exit 1`. -# This allows us to (i) not waste time completing a build once a step has failed and (ii) easily tell when and why a build failed using the analyse_build.py script. -runcmd: - - | - # Utility function to send a message to stderr then return an error code - die() { - echo "$1" >&2 - exit "${2-1}" - } - - sleep 10 # ensure that run commands are cleanly time-separated from other cloud-init commands - - echo ">=== $(date +%s) Preparing system for installation ===<" - # Allowing cloud.rproject.org would give unrestricted access to CRAN - # As we do not want this, we remove this repository before continuing deployment - - echo "Removing R updates repository..." - - rm /etc/apt/sources.list.d/r-base-version.list - - apt update - # Install python packages used in the rest of the build - - echo "Installing python packages..." - - pip3 install --upgrade -r /opt/build/packages/packages-python-system.txt - - pip3 list --format columns - # Log system performance during the installation and record it each minute - - echo "Setting up glances..." - - nohup glances --stdout-csv now,mem.used,mem.total,cpu.idle --time 30 > /opt/monitoring/performance_log.csv & - # Suppress apt prompts and warning messages - - echo "Suppressing apt prompts..." - - export DEBIAN_FRONTEND=noninteractive - # Add an entry for PATH to default bashrc - - echo "Adding PATH setting to default bashrc" - - echo '# Set path' >> /etc/bash.bashrc - - echo "export PATH=$PATH" >> /etc/bash.bashrc - - echo '' >> /etc/bash.bashrc - - - echo ">=== $(date +%s) Installing additional deb/snap/source packages ===<" - # Add snap to path - - echo "Adding snap to path..." - - PATH=$PATH:/snap/bin - # Microsoft ODBC tools - - echo "Installing Microsoft ODBC tools..." - - sleep 10 # wait for possible dpkg locks - - ACCEPT_EULA=Y apt-get install -y msodbcsql17 mssql-tools - - PATH=$PATH:/opt/mssql-tools/bin - - echo "export DOTNET_CLI_TELEMETRY_OPTOUT=1" >> /etc/bash.bashrc - - if [ "$(which sqlcmd)" = "" ]; then echo "Could not install Microsoft ODBC tools!"; exit 1; else echo "... successfully installed Microsoft ODBC tools"; fi - # Azure Data Studio - - echo "Installing Azure Data Studio..." - - /opt/build/download_and_install_deb.sh azuredatastudio - - if [ "$(which azuredatastudio)" = "" ]; then echo "Could not install Azure Data Studio!"; exit 1; else echo "... successfully installed Azure Data Studio"; fi - # Install drivers from maven - - echo "Installing DBeaver drivers..." - - DBEAVER_DRIVER_DIR="/usr/share/dbeaver-ce/drivers/maven/maven-central" - # Note that the filenames specified here have to be kept synchronised with the names in the dbeaver_drivers_config.xml file. - # Adding new drivers therefore involves changing both this file and the XML file. - - mkdir -p ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - wget -nv https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/{{dbeaver.drivers.mssql_jdbc}}/mssql-jdbc-{{dbeaver.drivers.mssql_jdbc}}.jar -P ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - wget -nv https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/{{dbeaver.drivers.mssql_jdbc}}/mssql-jdbc-{{dbeaver.drivers.mssql_jdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/pgjdbc-core-parent/{{dbeaver.drivers.pgjdbc}}/pgjdbc-core-parent-{{dbeaver.drivers.pgjdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/pgjdbc-versions/{{dbeaver.drivers.pgjdbc}}/pgjdbc-versions-{{dbeaver.drivers.pgjdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/postgresql/{{dbeaver.drivers.postgresl}}/postgresql-{{dbeaver.drivers.postgresl}}.jar -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/postgresql/{{dbeaver.drivers.postgresl}}/postgresql-{{dbeaver.drivers.postgresl}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/{{dbeaver.drivers.postgis_jdbc}}/postgis-jdbc-{{dbeaver.drivers.postgis_jdbc}}.jar -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/{{dbeaver.drivers.postgis_jdbc}}/postgis-jdbc-{{dbeaver.drivers.postgis_jdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-geometry/{{dbeaver.drivers.postgis_geometry}}/postgis-geometry-{{dbeaver.drivers.postgis_geometry}}.jar -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-geometry/{{dbeaver.drivers.postgis_geometry}}/postgis-geometry-{{dbeaver.drivers.postgis_geometry}}.pom -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - wget -nv https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/{{dbeaver.drivers.waffle_jna}}/waffle-jna-{{dbeaver.drivers.waffle_jna}}.jar -P ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - wget -nv https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/{{dbeaver.drivers.waffle_jna}}/waffle-jna-{{dbeaver.drivers.waffle_jna}}.pom -P ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - mv /opt/build/dbeaver_drivers_config.xml /usr/share/dbeaver-ce/drivers-config.xml - - echo "-Ddbeaver.drivers.configuration-file=/usr/share/dbeaver-ce/drivers-config.xml" >> /usr/share/dbeaver-ce/dbeaver.ini - - echo "-Djava.security.auth.login.config=/etc/jaas.conf" >> /usr/share/dbeaver-ce/dbeaver.ini - - ls -alh ${DBEAVER_DRIVER_DIR}/* - - echo "... successfully installed DBeaver drivers"; - # Install RStudio - - echo "Installing RStudio..." - - /opt/build/download_and_install_deb.sh rstudio - - if [ "$(which rstudio)" = "" ]; then echo "Could not install RStudio!"; exit 1; else echo "... successfully installed RStudio"; fi - # Install bats - - echo "Installing bats..." - - git clone https://github.com/bats-core/bats-core /opt/bats/bats-core - - git clone https://github.com/bats-core/bats-support /opt/bats/bats-support - - git clone https://github.com/bats-core/bats-assert /opt/bats/bats-assert - - git clone https://github.com/bats-core/bats-file /opt/bats/bats-file - - /opt/bats/bats-core/install.sh /usr/local - - if [ "$(which bats)" = "" ]; then echo "Could not install bats!"; exit 1; else echo "... successfully installed bats"; fi - # Install pyenv and pyenv-virtualenv - - echo "Installing pyenv..." - - export PYENV_ROOT="/opt/pyenv" - - rm -rf $PYENV_ROOT 2> /dev/null - - PATH="$PYENV_ROOT/bin:$PATH" # NB. pyenv needs to be at the beginning of the path so that it can override other python versions - - curl -s -S -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash - - pyenv doctor || die "Could not configure pyenv!" - - if [ "$(which pyenv)" = "" ]; then echo "Could not install pyenv!"; exit 1; else echo "... successfully installed pyenv"; fi - # Get icons for webapps - - echo "Downloading icons..." - - mkdir -p /opt/icons - - wget https://raw.githubusercontent.com/hackmdio/codimd/develop/public/favicon.png -O /opt/icons/codimd.png || die "Could not find CodiMD icon!" - - wget https://about.gitlab.com/images/press/logo/png/gitlab-icon-rgb.png -O /opt/icons/gitlab.png || die "Could not find GitLab icon!" - - # Manually update ClamAV signature database - - echo ">=== $(date +%s) Updating ClamAV database ===<" - # We do not update ClamAV at build time since these definitions need to be updated at deployment times - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - # As clamav-unofficial-sigs has not been updated since 2014, we need the following fixes: - # Calls to SecuriteInfo fail with 'curl: (6) Could not resolve host: clamav.securiteinfo.com' - # - disable this source as suggested here: https://bugs.launchpad.net/ubuntu/+source/clamav-unofficial-sigs/+bug/1643172 - - sed -i '/si_dbs=/,/^"$/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - - sed -i '/si_update_hours/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - # Calls to MalwarePatrol fail with 'MalwarePatrol mbl.ndb database integrity tested BAD - SKIPPING' - # - disable this source as suggested here: https://www.mail-archive.com/pkg-clamav-devel@lists.alioth.debian.org/msg05014.html - - sed -i '/mbl_dbs=/,/^"$/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - # Update unofficial signatures (strangely this gives no output in the cloud-init logs) - - clamav-unofficial-sigs 2>&1 - - clamav-unofficial-sigs -i - - # Install Spark and Hadoop - approximately 10 minutes - - echo ">=== $(date +%s) Installing Spark/Hadoop ===<" - - shui install --latest --target /opt - - mv /opt/spark* /opt/spark - - PATH=$PATH:/opt/spark/bin - - if [ "$(which spark-shell)" = "" ]; then echo "Could not install Spark/Hadoop!"; exit 1; else echo "... successfully installed Spark/Hadoop"; fi - - # Install Julia and packages - approximately 20 minutes - - echo ">=== $(date +%s) Installing Julia and packages ===<" - - jill install stable --confirm --install_dir /opt/julia - - JULIA_BASE_DIR=$(ls -d /opt/julia/julia*) - - if [ "$(which julia)" = "" ]; then echo "Could not install Julia!"; exit 1; fi - - export JULIA_PACKAGES="[\"$(sed '/^$/d' /opt/build/packages/packages-julia.list | paste -s -d '|' | sed 's/|/", "/g')\"]" - # Create a global Julia depot for package installation - - export JULIA_DEPOT_PATH="${JULIA_BASE_DIR}/depot/" - - mkdir -p ${JULIA_DEPOT_PATH} - - sed -i "/DEPOT_PATH/d" ${JULIA_BASE_DIR}/etc/julia/startup.jl - - echo "push!(DEPOT_PATH, \"${JULIA_DEPOT_PATH}\")" >> ${JULIA_BASE_DIR}/etc/julia/startup.jl - # Set some Julia environment variables before installing/building the packages - - export JULIA_COPY_STACKS=1 - - export PYTHON="$(which python3)" # link Julia against system Python 3 - - julia -e "using Pkg; Pkg.add($JULIA_PACKAGES); for package in $JULIA_PACKAGES; Pkg.build(package); end" - # Ensure that Julia depot is globally readable - - chmod -R o=u,o-w ${JULIA_BASE_DIR}/depot/ - # Write Julia environment variables to global .bashrc - - echo "export JULIA_COPY_STACKS=${JULIA_COPY_STACKS}" >> /etc/bash.bashrc - - echo "export JULIA_DEPOT_PATH=~/.julia:${JULIA_DEPOT_PATH}" >> /etc/bash.bashrc # ensure that each user's DEPOT_PATH will be the usual default (~/.julia) followed by the global depot - - echo "export PYTHON=${PYTHON}" >> /etc/bash.bashrc - # Check for missing packages - - MISSING_JULIA_PACKAGES=$(julia -e "for package in $JULIA_PACKAGES; try; abspath(joinpath(dirname(Base.find_package(package)))); @eval using \$(Symbol(package)); catch e; println(package); end; end;") - - if [ "$MISSING_JULIA_PACKAGES" ]; then echo "Could not install Julia packages - $MISSING_JULIA_PACKAGES"; exit 1; else echo "... successfully installed Julia"; fi - # Add the Julia kernel to Jupyter - - mv /root/.local/share/jupyter ${JULIA_BASE_DIR}/share - - jupyter kernelspec install ${JULIA_BASE_DIR}/share/jupyter/kernels/julia-* || die "Could not install Julia kernel!" - - # Install python versions - - /opt/build/pyenv/build_requirements.py /opt/build/pyenv/packages-python.yaml - - | - for PYTHON_VERSION in $(ls /opt/build/python-*-requirements.txt | cut -d '-' -f 2 | sort -V); do - /opt/build/pyenv/install.sh $PYTHON_VERSION || die "Could not install Python ${PYTHON_VERSION}!"; - done - - # Configure pyenv: use a local PYENV_ROOT with the global versions symlinked into it - - echo ">=== $(date +%s) Configuring pyenv ===<" - - echo "Installed python versions:" - - du --si -d 1 ${PYENV_ROOT}/versions - - export PYENV_LOCAL_ROOT='${HOME}/.pyenv' - - echo "Configuring pyenv initialisation" - - echo '# Set up pyenv' >> /etc/bash.bashrc - - echo "export PYENV_ROOT=\"${PYENV_LOCAL_ROOT}\"" >> /etc/bash.bashrc - - echo "for version in $(ls ${PYENV_ROOT}/versions | xargs); do [ -d \"${PYENV_LOCAL_ROOT}/versions/\${version}\" ] || (mkdir -p ${PYENV_LOCAL_ROOT}/versions/\${version} && ln -s ${PYENV_ROOT}/versions/\${version}/* ${PYENV_LOCAL_ROOT}/versions/\${version}); done" >> /etc/bash.bashrc - # Setup pyenv executables - - echo 'eval "$(pyenv init --path)"' >> /etc/bash.bashrc - - echo 'eval "$(pyenv init - --no-rehash)"' >> /etc/bash.bashrc - - echo 'eval "$(pyenv virtualenv-init -)"' >> /etc/bash.bashrc - - echo "pyenv rehash" >> /etc/bash.bashrc - - echo "pyenv global $(ls ${PYENV_ROOT}/versions/ | sort -V | tail -n 2 | head -n 1)" >> /etc/bash.bashrc - # Create a centralised Python shim for use by Jupyter - - echo '#!/usr/bin/env bash' > /opt/pyenv/bin/python-for-jupyter - - echo 'exec "$(which python)" "$@"' >> /opt/pyenv/bin/python-for-jupyter - - chmod ugo+x /opt/pyenv/bin/python-for-jupyter - # Add a Jupyter kernel called 'python3' which will overwrite the native kernel - - python3 -m ipykernel install --name "python3" --display-name "Python 3 (active version)" - - | - KERNEL_PATH="/usr/local/share/jupyter/kernels/python3/kernel.json" - if [ -e "${KERNEL_PATH}" ]; then - python3 -c "import json; kernel = json.load(open('${KERNEL_PATH}', 'r')); kernel['argv'][0] = '/opt/pyenv/bin/python-for-jupyter'; kernel['display_name'] = 'Python 3 (active version)'; json.dump(kernel, open('${KERNEL_PATH}', 'w'), indent=1)" - fi - - # Install any missing R packages - approximately 1 hr - - echo ">=== $(date +%s) Installing R packages ===<" - - export HOME=/root # this is needed for the installation of the 'credentials' package - - echo '# Set up RStudio' >> /etc/bash.bashrc - - echo "export RSTUDIO_WHICH_R=/usr/bin/R" >> /etc/bash.bashrc # this ensures that all users will pick up system R when running RStudio - # Fix for GDAL issue in sf: https://github.com/r-spatial/sf/issues/1899 - - Rscript -e "install.packages('devtools', quiet = TRUE); devtools::install_github('r-spatial/sf', quiet = TRUE)" - # Install CRAN packages and terminate if any are missing - - export CRAN_PACKAGES="\"$(sed '/^$/d' /opt/build/packages/packages-r-cran.list | paste -s -d '|' | sed 's/|/", "/g')\"" - - echo "Preparing to install $(echo $CRAN_PACKAGES | wc -w) CRAN packages\n${CRAN_PACKAGES}" - - Rscript -e "options('Ncpus' = parallel::detectCores()); requested_pkgs <- c($CRAN_PACKAGES); remaining_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; if(length(remaining_pkgs)) { print(paste('Installing', paste(remaining_pkgs, collapse=', '))); install.packages(remaining_pkgs, quiet = TRUE) } else { print('No packages left to install') }" - - MISSING_CRAN_PACKAGES=$(Rscript -e "requested_pkgs <- c($CRAN_PACKAGES); missing_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; print(missing_pkgs)" | sed "s/character(0)//") - - if [ "$MISSING_CRAN_PACKAGES" ]; then echo "Could not install CRAN packages\n${MISSING_CRAN_PACKAGES}"; exit 1; else echo "... successfully installed CRAN packages"; fi - # Install BioConductor packages and terminate if any are missing - - export BIOCONDUCTOR_PACKAGES="\"$(sed '/^$/d' /opt/build/packages/packages-r-bioconductor.list | paste -s -d '|' | sed 's/|/", "/g')\"" - - echo "Preparing to install $(echo $BIOCONDUCTOR_PACKAGES | wc -w) BioConductor packages\n${BIOCONDUCTOR_PACKAGES}" - - Rscript -e "options('Ncpus' = parallel::detectCores()); requested_pkgs <- c($BIOCONDUCTOR_PACKAGES); remaining_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; if(length(remaining_pkgs)) { print(paste('Installing', paste(remaining_pkgs, collapse=', '))); BiocManager::install(remaining_pkgs, quiet = TRUE) } else { print('No packages left to install') }" - - MISSING_BIOCONDUCTOR_PACKAGES=$(Rscript -e "requested_pkgs <- c($BIOCONDUCTOR_PACKAGES); missing_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; print(missing_pkgs)" | sed "s/character(0)//") - - if [ "$MISSING_BIOCONDUCTOR_PACKAGES" ]; then echo "Could not install Bioconductor packages\n${MISSING_BIOCONDUCTOR_PACKAGES}"; exit 1; else echo "... successfully installed BioConductor packages"; fi - # Add the R kernel to Jupyter - - R_VERSION=$(R --version | head -n 1 | cut -d ' ' -f3) - - | - sed -i "s|\"display_name\":.*\R.*|\"display_name\": \"R ${R_VERSION}\",|" /usr/local/lib/R/site-library/IRkernel/kernelspec/kernel.json - - ln -s /usr/local/lib/R/site-library/IRkernel/kernelspec /usr/local/lib/R/site-library/IRkernel/R${R_VERSION} - - jupyter kernelspec install /usr/local/lib/R/site-library/IRkernel/R${R_VERSION} || die "Could not install R kernel!"; - - # Clean up any build artifacts - - echo ">=== $(date +%s) Cleaning up the build environment ===<" - - USED_BYTES_OLD=$(df / | tail -n 1 | awk '{printf $3}') - # Remove netcat (potential security issue) [Note this will remove the 'ubuntu-minimal' metapackage but does not remove any other real packages] - # Remove xscreensaver (unnecessary) - - apt-get remove -y netcat-openbsd xscreensaver - # Fix any broken packages then clean and remove any unused packages - - apt-get --fix-broken install -y - - apt-get autoclean -y - - apt-get autoremove -y --purge - - apt-get clean - # Remove temporary files - - tmpreaper 10m /tmp/ /var/tmp/ # remove temporary files that have not been accessed in 10 minutes - - find /root/ -mindepth 1 -delete # remove all files from root's home directory including dotfiles - # Log space saved - - USED_BYTES_NEW=$(df / | tail -n 1 | awk '{printf $3}') - - echo "Successfully reclaimed $(numfmt --to=iec-i --suffix=B $(($USED_BYTES_OLD - $USED_BYTES_NEW))) of disk space" - - # Check for successful installations - - echo ">=== $(date +%s) Checking environment configuration ===<" - # Check that Jupyter kernels are correctly configured - - echo "Checking installed Jupyter kernels" - - jupyter kernelspec list - # Set PATH to the current working version which contains all installed packages - # Append ~/.local/bin and ~/bin so that any executables that are installed there (eg. by pip) can be used - # We do this at the end of the script so that - # - we know this is the PATH that worked when we checked for each package - # - we only get one entry in /etc/bash.bashrc rather than several with "last-one-wins" - - PATH="$PATH:\$HOME/.local/bin:\$HOME/bin" - - echo "Setting PATH to '${PATH}'" - - sed -i "s|^export PATH=.*|export PATH=${PATH}|" /etc/bash.bashrc - # Run installation tests - - python3 /opt/build/check_installation.py || die "Installation check failed!"; - -final_message: "System setup through cloud-init is finished. Configuration took $UPTIME seconds" - -# Shutdown at the end of the job to save on running costs -power_state: - mode: poweroff - message: "Shutting down after cloud-init is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-2004.mustache.yaml b/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-2004.mustache.yaml deleted file mode 100644 index 3548c2870e..0000000000 --- a/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-2004.mustache.yaml +++ /dev/null @@ -1,410 +0,0 @@ -#cloud-config - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: Etc/UTC - -# Update package database on first boot -package_update: true - -# Upgrade installed packages on first boot -package_upgrade: true - -apt: - # Append to the existing /etc/apt/sources.list - preserve_sources_list: true - - # Add repositories - sources: - dbeaver.list: - source: "deb https://dbeaver.io/debs/dbeaver-ce /" - keyid: 98F5A7CC1ABE72AC3852A007D33A1BD725ED047D # DBeaver Co. (DBeaver Co. GPG key) - - microsoft-general.list: - source: "deb https://packages.microsoft.com/ubuntu/20.04/prod focal main" - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - microsoft-vscode.list: - source: "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main" - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - nvidia-cuda.list: - source: "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /" - keyid: EB693B3035CD5710E231E123A4B469963BF863CC # cudatools - - nvidia-ml.list: - source: "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" - keyid: AE09FE4BBD223A84B2CCFCE3F60F4B3D7FA2AF80 # cudatools - - postgresql.list: - source: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" - keyid: B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # PostgreSQL Debian Repository - - qgis.list: - source: "deb https://ubuntu.qgis.org/ubuntu focal main" - keyid: 2D7E3441A707FDB3E7059441D155B8E6A419C5BE # QGIS Archive Automatic Signing Key (2022-2027) - - r-base-version.list: - source: "deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/" - keyid: E298A3A825C0D65DFD57CBB651716619E084DAB9 # Michael Rutter - -write_files: - - path: "/etc/jaas.conf" - permissions: "0444" - content: | - {{jaas.conf}} - - - path: "/opt/build/azuredatastudio.debinfo" - permissions: "0400" - content: | - {{deb-azuredatastudio.version}} - - - path: "/opt/build/check_installation.py" - permissions: "0400" - content: | - {{check_installation.py}} - - - path: "/opt/build/dbeaver_drivers_config.xml" - permissions: "0444" - content: | - {{dbeaver_drivers_config.mustache.xml}} - - - path: "/opt/build/deprovision_vm.sh" - permissions: "0500" - content: | - {{deprovision_vm.sh}} - - - path: "/opt/build/download_and_install_deb.sh" - permissions: "0500" - content: | - {{download_and_install_deb.sh}} - - - path: "/opt/build/packages/packages-julia.list" - permissions: "0444" - content: | - {{packages-julia.list}} - - - path: "/opt/build/packages/packages-r-bioconductor.list" - permissions: "0444" - content: | - {{packages-r-bioconductor.list}} - - - path: "/opt/build/packages/packages-r-cran.list" - permissions: "0444" - content: | - {{packages-r-cran.list}} - - - path: "/opt/build/packages/packages-python-system.txt" - permissions: "0444" - content: | - {{packages-python-system.txt}} - - - path: "/opt/build/pyenv/build_requirements.py" - permissions: "0500" - content: | - {{pyenv_build_requirements.py}} - - - path: "/opt/build/pyenv/install.sh" - permissions: "0500" - content: | - {{pyenv_install.sh}} - - - path: "/opt/build/pyenv/packages-python.yaml" - permissions: "0400" - content: | - {{packages-python.yaml}} - - - path: "/opt/build/rstudio.debinfo" - permissions: "0400" - content: | - {{deb-rstudio-focal.version}} - - - path: "/opt/monitoring/analyse_build.py" - permissions: "0755" - content: | - {{analyse_build.py}} - - - path: "/opt/monitoring/deprovision.log" - permissions: "0600" - content: | - # Deprovisioning log - - - path: "/usr/share/applications/pycharm.desktop" - permissions: "0644" - content: | - {{pycharm.desktop}} - -# Packages to install with snap -snap: - commands: - - snap install {{packages-snap.list}} - -# List of packages to install with apt-get -packages: - - {{packages-apt.list}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -# List of commands to run using `/bin/sh` -# When changing these lines try to ensure that everything is checked as close as possible to the point when it is installed/configured. -# If any of the installation/configuration checks fail then end the cloud-init process immediately by calling `exit 1`. -# This allows us to (i) not waste time completing a build once a step has failed and (ii) easily tell when and why a build failed using the analyse_build.py script. -runcmd: - - | - # Utility function to send a message to stderr then return an error code - die() { - echo "$1" >&2 - exit "${2-1}" - } - - sleep 10 # ensure that run commands are cleanly time-separated from other cloud-init commands - - echo ">=== $(date +%s) Preparing system for installation ===<" - # Allowing cloud.rproject.org would give unrestricted access to CRAN - # As we do not want this, we remove this repository before continuing deployment - - echo "Removing R updates repository..." - - rm /etc/apt/sources.list.d/r-base-version.list - # Remove any unused auto-installed packages - - apt update - - apt-get autoclean -y - - apt-get autoremove -y --purge - # Install python packages used in the rest of the build - - echo "Installing python packages..." - - pip3 install --upgrade -r /opt/build/packages/packages-python-system.txt - - pip3 list --format columns - # Log system performance during the installation and record it each minute - - echo "Setting up glances..." - - nohup glances --stdout-csv now,mem.used,mem.total,cpu.idle --time 30 > /opt/monitoring/performance_log.csv & - # Suppress apt prompts and warning messages - - echo "Suppressing apt prompts..." - - export DEBIAN_FRONTEND=noninteractive - # Add an entry for PATH to default bashrc - - echo "Adding PATH setting to default bashrc" - - echo '# Set path' >> /etc/bash.bashrc - - echo "export PATH=$PATH" >> /etc/bash.bashrc - - echo '' >> /etc/bash.bashrc - - - echo ">=== $(date +%s) Installing additional deb/source packages ===<" - # Add snap to path - - echo "Adding snap to path..." - - PATH=$PATH:/snap/bin - # Microsoft ODBC tools - - echo "Installing Microsoft ODBC tools..." - - sleep 10 # wait for possible dpkg locks - - ACCEPT_EULA=Y apt-get install -y msodbcsql17 mssql-tools - - PATH=$PATH:/opt/mssql-tools/bin - - echo "export DOTNET_CLI_TELEMETRY_OPTOUT=1" >> /etc/bash.bashrc - - if [ "$(which sqlcmd)" = "" ]; then echo "Could not install Microsoft ODBC tools!"; exit 1; else echo "... successfully installed Microsoft ODBC tools"; fi - # Azure Data Studio - - echo "Installing Azure Data Studio..." - - /opt/build/download_and_install_deb.sh azuredatastudio - - if [ "$(which azuredatastudio)" = "" ]; then echo "Could not install Azure Data Studio!"; exit 1; else echo "... successfully installed Azure Data Studio"; fi - # Install drivers from maven - - echo "Installing DBeaver drivers..." - - DBEAVER_DRIVER_DIR="/usr/share/dbeaver-ce/drivers/maven/maven-central" - # Note that the filenames specified here have to be kept synchronised with the names in the dbeaver_drivers_config.xml file. - # Adding new drivers therefore involves changing both this file and the XML file. - - mkdir -p ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - wget -nv https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/{{dbeaver.drivers.mssql_jdbc}}/mssql-jdbc-{{dbeaver.drivers.mssql_jdbc}}.jar -P ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - wget -nv https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/{{dbeaver.drivers.mssql_jdbc}}/mssql-jdbc-{{dbeaver.drivers.mssql_jdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/pgjdbc-core-parent/{{dbeaver.drivers.pgjdbc}}/pgjdbc-core-parent-{{dbeaver.drivers.pgjdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/pgjdbc-versions/{{dbeaver.drivers.pgjdbc}}/pgjdbc-versions-{{dbeaver.drivers.pgjdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/postgresql/{{dbeaver.drivers.postgresql}}/postgresql-{{dbeaver.drivers.postgresql}}.jar -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/postgresql/{{dbeaver.drivers.postgresql}}/postgresql-{{dbeaver.drivers.postgresql}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/{{dbeaver.drivers.postgis_jdbc}}/postgis-jdbc-{{dbeaver.drivers.postgis_jdbc}}.jar -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/{{dbeaver.drivers.postgis_jdbc}}/postgis-jdbc-{{dbeaver.drivers.postgis_jdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-geometry/{{dbeaver.drivers.postgis_geometry}}/postgis-geometry-{{dbeaver.drivers.postgis_geometry}}.jar -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-geometry/{{dbeaver.drivers.postgis_geometry}}/postgis-geometry-{{dbeaver.drivers.postgis_geometry}}.pom -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - wget -nv https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/{{dbeaver.drivers.waffle_jna}}/waffle-jna-{{dbeaver.drivers.waffle_jna}}.jar -P ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - wget -nv https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/{{dbeaver.drivers.waffle_jna}}/waffle-jna-{{dbeaver.drivers.waffle_jna}}.pom -P ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - mv /opt/build/dbeaver_drivers_config.xml /usr/share/dbeaver-ce/drivers-config.xml - - echo "-Ddbeaver.drivers.configuration-file=/usr/share/dbeaver-ce/drivers-config.xml" >> /usr/share/dbeaver-ce/dbeaver.ini - - echo "-Djava.security.auth.login.config=/etc/jaas.conf" >> /usr/share/dbeaver-ce/dbeaver.ini - - ls -alh ${DBEAVER_DRIVER_DIR}/* - - echo "... successfully installed DBeaver drivers"; - # Install RStudio - - echo "Installing RStudio..." - - /opt/build/download_and_install_deb.sh rstudio - - if [ "$(which rstudio)" = "" ]; then echo "Could not install RStudio!"; exit 1; else echo "... successfully installed RStudio"; fi - # Install bats - - echo "Installing bats..." - - git clone https://github.com/bats-core/bats-core /opt/bats/bats-core - - git clone https://github.com/bats-core/bats-support /opt/bats/bats-support - - git clone https://github.com/bats-core/bats-assert /opt/bats/bats-assert - - git clone https://github.com/bats-core/bats-file /opt/bats/bats-file - - /opt/bats/bats-core/install.sh /usr/local - - if [ "$(which bats)" = "" ]; then echo "Could not install bats!"; exit 1; else echo "... successfully installed bats"; fi - # Install pyenv and pyenv-virtualenv - - echo "Installing pyenv..." - - export PYENV_ROOT="/opt/pyenv" - - rm -rf $PYENV_ROOT 2> /dev/null - - PATH="$PYENV_ROOT/bin:$PATH" # NB. pyenv needs to be at the beginning of the path so that it can override other python versions - - curl -s -S -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash - - pyenv doctor || die "Could not configure pyenv!" - - if [ "$(which pyenv)" = "" ]; then echo "Could not install pyenv!"; exit 1; else echo "... successfully installed pyenv"; fi - # Get icons for webapps - - echo "Downloading icons..." - - mkdir -p /opt/icons - - wget https://raw.githubusercontent.com/hackmdio/codimd/develop/public/favicon.png -O /opt/icons/codimd.png || die "Could not find CodiMD icon!" - - wget https://about.gitlab.com/images/press/logo/png/gitlab-icon-rgb.png -O /opt/icons/gitlab.png || die "Could not find GitLab icon!" - - # Manually update ClamAV signature database - - echo ">=== $(date +%s) Updating ClamAV database ===<" - # We do not update ClamAV at build time since these definitions need to be updated at deployment times - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - # As clamav-unofficial-sigs has not been updated since 2014, we need the following fixes: - # Calls to SecuriteInfo fail with 'curl: (6) Could not resolve host: clamav.securiteinfo.com' - # - disable this source as suggested here: https://bugs.launchpad.net/ubuntu/+source/clamav-unofficial-sigs/+bug/1643172 - - sed -i '/si_dbs=/,/^"$/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - - sed -i '/si_update_hours/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - # Calls to MalwarePatrol fail with 'MalwarePatrol mbl.ndb database integrity tested BAD - SKIPPING' - # - disable this source as suggested here: https://www.mail-archive.com/pkg-clamav-devel@lists.alioth.debian.org/msg05014.html - - sed -i '/mbl_dbs=/,/^"$/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - # Update unofficial signatures (strangely this gives no output in the cloud-init logs) - - clamav-unofficial-sigs 2>&1 - - clamav-unofficial-sigs -i - - # Install Spark and Hadoop - approximately 10 minutes - - echo ">=== $(date +%s) Installing Spark/Hadoop ===<" - - shui install --latest --target /opt - - mv /opt/spark* /opt/spark - - PATH=$PATH:/opt/spark/bin - - if [ "$(which spark-shell)" = "" ]; then echo "Could not install Spark/Hadoop!"; exit 1; else echo "... successfully installed Spark/Hadoop"; fi - - # Install Julia and packages - approximately 20 minutes - - echo ">=== $(date +%s) Installing Julia and packages ===<" - - jill install stable --confirm --install_dir /opt/julia - - JULIA_BASE_DIR=$(ls -d /opt/julia/julia*) - - if [ "$(which julia)" = "" ]; then echo "Could not install Julia!"; exit 1; fi - - export JULIA_PACKAGES="[\"$(sed '/^$/d' /opt/build/packages/packages-julia.list | paste -s -d '|' | sed 's/|/", "/g')\"]" - # Create a global Julia depot for package installation - - export JULIA_DEPOT_PATH="${JULIA_BASE_DIR}/depot/" - - mkdir -p ${JULIA_DEPOT_PATH} - - sed -i "/DEPOT_PATH/d" ${JULIA_BASE_DIR}/etc/julia/startup.jl - - echo "push!(DEPOT_PATH, \"${JULIA_DEPOT_PATH}\")" >> ${JULIA_BASE_DIR}/etc/julia/startup.jl - # Set some Julia environment variables before installing/building the packages - - export JULIA_COPY_STACKS=1 - - export PYTHON="$(which python3)" # link Julia against system Python 3 - - julia -e "using Pkg; Pkg.add($JULIA_PACKAGES); for package in $JULIA_PACKAGES; Pkg.build(package); end" - # Ensure that Julia depot is globally readable - - chmod -R o=u,o-w ${JULIA_BASE_DIR}/depot/ - # Write Julia environment variables to global .bashrc - - echo "export JULIA_COPY_STACKS=${JULIA_COPY_STACKS}" >> /etc/bash.bashrc - - echo "export JULIA_DEPOT_PATH=~/.julia:${JULIA_DEPOT_PATH}" >> /etc/bash.bashrc # ensure that each user's DEPOT_PATH will be the usual default (~/.julia) followed by the global depot - - echo "export PYTHON=${PYTHON}" >> /etc/bash.bashrc - # Check for missing packages - - MISSING_JULIA_PACKAGES=$(julia -e "for package in $JULIA_PACKAGES; try; abspath(joinpath(dirname(Base.find_package(package)))); @eval using \$(Symbol(package)); catch e; println(package); end; end;") - - if [ "$MISSING_JULIA_PACKAGES" ]; then echo "Could not install Julia packages - $MISSING_JULIA_PACKAGES"; exit 1; else echo "... successfully installed Julia"; fi - # Add the Julia kernel to Jupyter - - mv /root/.local/share/jupyter ${JULIA_BASE_DIR}/share - - jupyter kernelspec install ${JULIA_BASE_DIR}/share/jupyter/kernels/julia-* || die "Could not install Julia kernel!" - - # Install python versions - - /opt/build/pyenv/build_requirements.py /opt/build/pyenv/packages-python.yaml - - | - for PYTHON_VERSION in $(ls /opt/build/python-*-requirements.txt | cut -d '-' -f 2 | sort -V); do - /opt/build/pyenv/install.sh $PYTHON_VERSION || die "Could not install Python ${PYTHON_VERSION}!"; - done - - # Configure pyenv: use a local PYENV_ROOT with the global versions symlinked into it - - echo ">=== $(date +%s) Configuring pyenv ===<" - - echo "Installed python versions:" - - du --si -d 1 ${PYENV_ROOT}/versions - - export PYENV_LOCAL_ROOT='${HOME}/.pyenv' - - echo "Configuring pyenv initialisation" - - echo '# Set up pyenv' >> /etc/bash.bashrc - - echo "export PYENV_ROOT=\"${PYENV_LOCAL_ROOT}\"" >> /etc/bash.bashrc - - echo "for version in $(ls ${PYENV_ROOT}/versions | xargs); do [ -d \"${PYENV_LOCAL_ROOT}/versions/\${version}\" ] || (mkdir -p ${PYENV_LOCAL_ROOT}/versions/\${version} && ln -s ${PYENV_ROOT}/versions/\${version}/* ${PYENV_LOCAL_ROOT}/versions/\${version}); done" >> /etc/bash.bashrc - # Setup pyenv executables - - echo 'eval "$(pyenv init --path)"' >> /etc/bash.bashrc - - echo 'eval "$(pyenv init - --no-rehash)"' >> /etc/bash.bashrc - - echo 'eval "$(pyenv virtualenv-init -)"' >> /etc/bash.bashrc - - echo "pyenv rehash" >> /etc/bash.bashrc - - echo "pyenv global $(ls ${PYENV_ROOT}/versions/ | sort -V | tail -n 2 | head -n 1)" >> /etc/bash.bashrc - # Create a centralised Python shim for use by Jupyter - - echo '#!/usr/bin/env bash' > /opt/pyenv/bin/python-for-jupyter - - echo 'exec "$(which python)" "$@"' >> /opt/pyenv/bin/python-for-jupyter - - chmod ugo+x /opt/pyenv/bin/python-for-jupyter - # Add a Jupyter kernel called 'python3' which will overwrite the native kernel - - python3 -m ipykernel install --name "python3" --display-name "Python 3 (active version)" - - | - KERNEL_PATH="/usr/local/share/jupyter/kernels/python3/kernel.json" - if [ -e "${KERNEL_PATH}" ]; then - python3 -c "import json; kernel = json.load(open('${KERNEL_PATH}', 'r')); kernel['argv'][0] = '/opt/pyenv/bin/python-for-jupyter'; json.dump(kernel, open('${KERNEL_PATH}', 'w'), indent=1)" - fi - - # Install any missing R packages - approximately 40 minutes - - echo ">=== $(date +%s) Installing R packages ===<" - - export HOME=/root # this is needed for the installation of the 'credentials' package - - echo '# Set up RStudio' >> /etc/bash.bashrc - - echo "export RSTUDIO_WHICH_R=/usr/bin/R" >> /etc/bash.bashrc # this ensures that all users will pick up system R when running RStudio - # Install CRAN packages and terminate if any are missing - - export CRAN_PACKAGES="\"$(sed '/^$/d' /opt/build/packages/packages-r-cran.list | paste -s -d '|' | sed 's/|/", "/g')\"" - - echo "Preparing to install $(echo $CRAN_PACKAGES | wc -w) CRAN packages\n${CRAN_PACKAGES}" - - Rscript -e "options('Ncpus' = parallel::detectCores()); requested_pkgs <- c($CRAN_PACKAGES); remaining_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; if(length(remaining_pkgs)) { print(paste('Installing', paste(remaining_pkgs, collapse=', '))); install.packages(remaining_pkgs, quiet = TRUE) } else { print('No packages left to install') }" - - MISSING_CRAN_PACKAGES=$(Rscript -e "requested_pkgs <- c($CRAN_PACKAGES); missing_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; print(missing_pkgs)" | sed "s/character(0)//") - - if [ "$MISSING_CRAN_PACKAGES" ]; then echo "Could not install CRAN packages\n${MISSING_CRAN_PACKAGES}"; exit 1; else echo "... successfully installed CRAN packages"; fi - # Install BioConductor packages and terminate if any are missing - - export BIOCONDUCTOR_PACKAGES="\"$(sed '/^$/d' /opt/build/packages/packages-r-bioconductor.list | paste -s -d '|' | sed 's/|/", "/g')\"" - - echo "Preparing to install $(echo $BIOCONDUCTOR_PACKAGES | wc -w) BioConductor packages\n${BIOCONDUCTOR_PACKAGES}" - - Rscript -e "options('Ncpus' = parallel::detectCores()); requested_pkgs <- c($BIOCONDUCTOR_PACKAGES); remaining_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; if(length(remaining_pkgs)) { print(paste('Installing', paste(remaining_pkgs, collapse=', '))); BiocManager::install(remaining_pkgs, quiet = TRUE) } else { print('No packages left to install') }" - - MISSING_BIOCONDUCTOR_PACKAGES=$(Rscript -e "requested_pkgs <- c($BIOCONDUCTOR_PACKAGES); missing_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; print(missing_pkgs)" | sed "s/character(0)//") - - if [ "$MISSING_BIOCONDUCTOR_PACKAGES" ]; then echo "Could not install Bioconductor packages\n${MISSING_BIOCONDUCTOR_PACKAGES}"; exit 1; else echo "... successfully installed BioConductor packages"; fi - # Add the R kernel to Jupyter - - R_VERSION=$(R --version | head -n 1 | cut -d ' ' -f3) - - ln -s /usr/local/lib/R/site-library/IRkernel/kernelspec /usr/local/lib/R/site-library/IRkernel/R${R_VERSION} - - | - sed -i "s|\"display_name\":.*\R.*|\"display_name\": \"R ${R_VERSION}\",|" /usr/local/lib/R/site-library/IRkernel/kernelspec/kernel.json - - jupyter kernelspec install /usr/local/lib/R/site-library/IRkernel/R${R_VERSION} || die "Could not install R kernel!"; - - # Clean up any build artifacts - - echo ">=== $(date +%s) Cleaning up the build environment ===<" - - USED_BYTES_OLD=$(df / | tail -n 1 | awk '{printf $3}') - # Remove netcat (potential security issue) [Note this will remove the 'ubuntu-minimal' metapackage but does not remove any other real packages] - # Remove xscreensaver (unnecessary) - - apt-get remove -y netcat-openbsd xscreensaver - # Fix any broken packages then clean and remove any unused packages - - apt-get --fix-broken install -y - - apt-get autoclean -y - - apt-get autoremove -y --purge - - apt-get clean - # Remove temporary files - - tmpreaper 10m /tmp/ /var/tmp/ # remove temporary files that have not been accessed in 10 minutes - - find /root/ -mindepth 1 -delete # remove all files from root's home directory including dotfiles - # Log space saved - - USED_BYTES_NEW=$(df / | tail -n 1 | awk '{printf $3}') - - echo "Successfully reclaimed $(numfmt --to=iec-i --suffix=B $(($USED_BYTES_OLD - $USED_BYTES_NEW))) of disk space" - - # Check for successful installations - - echo ">=== $(date +%s) Checking environment configuration ===<" - # Check that Jupyter kernels are correctly configured - - echo "Checking installed Jupyter kernels" - - jupyter kernelspec list - # Set PATH to the current working version which contains all installed packages - # Append ~/.local/bin and ~/bin so that any executables that are installed there (eg. by pip) can be used - # We do this at the end of the script so that - # - we know this is the PATH that worked when we checked for each package - # - we only get one entry in /etc/bash.bashrc rather than several with "last-one-wins" - - PATH="$PATH:\$HOME/.local/bin:\$HOME/bin" - - echo "Setting PATH to '${PATH}'" - - sed -i "s|^export PATH=.*|export PATH=${PATH}|" /etc/bash.bashrc - # Run installation tests - - python3 /opt/build/check_installation.py || die "Installation check failed!"; - -final_message: "System setup through cloud-init is finished. Configuration took $UPTIME seconds" - -# Shutdown at the end of the job to save on running costs -power_state: - mode: poweroff - message: "Shutting down after cloud-init is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-2204.mustache.yaml b/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-2204.mustache.yaml deleted file mode 100644 index 809e526e09..0000000000 --- a/deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-2204.mustache.yaml +++ /dev/null @@ -1,416 +0,0 @@ -#cloud-config - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: Etc/UTC - -# Update package database on first boot -package_update: true - -# Upgrade installed packages on first boot -package_upgrade: true - -apt: - # Append to the existing /etc/apt/sources.list - preserve_sources_list: true - - # Add repositories - sources: - dbeaver.list: - source: "deb https://dbeaver.io/debs/dbeaver-ce /" - keyid: 98F5A7CC1ABE72AC3852A007D33A1BD725ED047D # DBeaver Co. (DBeaver Co. GPG key) - - microsoft-general-2004.list: - source: "deb https://packages.microsoft.com/ubuntu/20.04/prod focal main" # needed for some packages eg. blobfuse - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - microsoft-general.list: - source: "deb https://packages.microsoft.com/ubuntu/22.04/prod jammy main" - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - microsoft-vscode.list: - source: "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main" - keyid: BC528686B50D79E339D3721CEB3E94ADBE1229CF # Microsoft (Release signing) - - nvidia-cuda.list: - source: "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64 /" - keyid: EB693B3035CD5710E231E123A4B469963BF863CC # cudatools - - nvidia-ml.list: - source: "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" # there is no 22.04 repository at present - keyid: AE09FE4BBD223A84B2CCFCE3F60F4B3D7FA2AF80 # cudatools - - postgresql.list: - source: "deb http://apt.postgresql.org/pub/repos/apt/ jammy-pgdg main" - keyid: B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # PostgreSQL Debian Repository - - qgis.list: - source: "deb https://ubuntu.qgis.org/ubuntu jammy main" - keyid: 2D7E3441A707FDB3E7059441D155B8E6A419C5BE # QGIS Archive Automatic Signing Key (2022-2027) - - r-base-version.list: - source: "deb https://cloud.r-project.org/bin/linux/ubuntu jammy-cran40/" - keyid: E298A3A825C0D65DFD57CBB651716619E084DAB9 # Michael Rutter - -write_files: - - path: "/etc/jaas.conf" - permissions: "0444" - content: | - {{jaas.conf}} - - - path: "/opt/build/azuredatastudio.debinfo" - permissions: "0400" - content: | - {{deb-azuredatastudio.version}} - - - path: "/opt/build/check_installation.py" - permissions: "0400" - content: | - {{check_installation.py}} - - - path: "/opt/build/dbeaver_drivers_config.xml" - permissions: "0444" - content: | - {{dbeaver_drivers_config.mustache.xml}} - - - path: "/opt/build/deprovision_vm.sh" - permissions: "0500" - content: | - {{deprovision_vm.sh}} - - - path: "/opt/build/download_and_install_deb.sh" - permissions: "0500" - content: | - {{download_and_install_deb.sh}} - - - path: "/opt/build/packages/packages-julia.list" - permissions: "0444" - content: | - {{packages-julia.list}} - - - path: "/opt/build/packages/packages-r-bioconductor.list" - permissions: "0444" - content: | - {{packages-r-bioconductor.list}} - - - path: "/opt/build/packages/packages-r-cran.list" - permissions: "0444" - content: | - {{packages-r-cran.list}} - - - path: "/opt/build/packages/packages-python-system.txt" - permissions: "0444" - content: | - {{packages-python-system.txt}} - - - path: "/opt/build/pyenv/build_requirements.py" - permissions: "0500" - content: | - {{pyenv_build_requirements.py}} - - - path: "/opt/build/pyenv/install.sh" - permissions: "0500" - content: | - {{pyenv_install.sh}} - - - path: "/opt/build/pyenv/packages-python.yaml" - permissions: "0400" - content: | - {{packages-python.yaml}} - - - path: "/opt/build/rbase.debinfo" - permissions: "0400" - content: | - {{deb-rbase-jammy.version}} - - - path: "/opt/build/rstudio.debinfo" - permissions: "0400" - content: | - {{deb-rstudio-jammy.version}} - - - path: "/opt/monitoring/analyse_build.py" - permissions: "0755" - content: | - {{analyse_build.py}} - - - path: "/opt/monitoring/deprovision.log" - permissions: "0600" - content: | - # Deprovisioning log - - - path: "/usr/share/applications/pycharm.desktop" - permissions: "0644" - content: | - {{pycharm.desktop}} - -# Packages to install with snap -snap: - commands: - - snap install {{packages-snap.list}} - -# List of packages to install with apt-get -packages: - - {{packages-apt.list}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#time.ntp.serverAddresses}} - - {{.}} - {{/time.ntp.serverAddresses}} - -# List of commands to run using `/bin/sh` -# When changing these lines try to ensure that everything is checked as close as possible to the point when it is installed/configured. -# If any of the installation/configuration checks fail then end the cloud-init process immediately by calling `exit 1`. -# This allows us to (i) not waste time completing a build once a step has failed and (ii) easily tell when and why a build failed using the analyse_build.py script. -runcmd: - - | - # Utility function to send a message to stderr then return an error code - die() { - echo "$1" >&2 - exit "${2-1}" - } - - sleep 10 # ensure that run commands are cleanly time-separated from other cloud-init commands - - echo ">=== $(date +%s) Preparing system for installation ===<" - # Allowing cloud.rproject.org would give unrestricted access to CRAN - # As we do not want this, we remove this repository before continuing deployment - - echo "Removing R updates repository..." - - rm /etc/apt/sources.list.d/r-base-version.list - - apt update - # Install python packages used in the rest of the build - - echo "Installing python packages..." - - pip3 install --upgrade -r /opt/build/packages/packages-python-system.txt - - pip3 list --format columns - # Log system performance during the installation and record it each minute - - echo "Setting up glances..." - - nohup glances --stdout-csv now,mem.used,mem.total,cpu.idle --time 30 > /opt/monitoring/performance_log.csv & - # Suppress apt prompts and warning messages - - echo "Suppressing apt prompts..." - - export DEBIAN_FRONTEND=noninteractive - # Add an entry for PATH to default bashrc - - echo "Adding PATH setting to default bashrc" - - echo '# Set path' >> /etc/bash.bashrc - - echo "export PATH=$PATH" >> /etc/bash.bashrc - - echo '' >> /etc/bash.bashrc - - - echo ">=== $(date +%s) Installing additional deb/source packages ===<" - # Add snap to path - - echo "Adding snap to path..." - - PATH=$PATH:/snap/bin - # Microsoft ODBC tools - - echo "Installing Microsoft ODBC tools..." - - sleep 10 # wait for possible dpkg locks - - ACCEPT_EULA=Y apt-get install -y msodbcsql17 mssql-tools - - PATH=$PATH:/opt/mssql-tools/bin - - echo "export DOTNET_CLI_TELEMETRY_OPTOUT=1" >> /etc/bash.bashrc - - if [ "$(which sqlcmd)" = "" ]; then echo "Could not install Microsoft ODBC tools!"; exit 1; else echo "... successfully installed Microsoft ODBC tools"; fi - # Azure Data Studio - - echo "Installing Azure Data Studio..." - - /opt/build/download_and_install_deb.sh azuredatastudio - - if [ "$(which azuredatastudio)" = "" ]; then echo "Could not install Azure Data Studio!"; exit 1; else echo "... successfully installed Azure Data Studio"; fi - # Install drivers from maven - - echo "Installing DBeaver drivers..." - - DBEAVER_DRIVER_DIR="/usr/share/dbeaver-ce/drivers/maven/maven-central" - # Note that the filenames specified here have to be kept synchronised with the names in the dbeaver_drivers_config.xml file. - # Adding new drivers therefore involves changing both this file and the XML file. - - mkdir -p ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - wget -nv https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/{{dbeaver.drivers.mssql_jdbc}}/mssql-jdbc-{{dbeaver.drivers.mssql_jdbc}}.jar -P ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - wget -nv https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/{{dbeaver.drivers.mssql_jdbc}}/mssql-jdbc-{{dbeaver.drivers.mssql_jdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/com.microsoft.sqlserver/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/pgjdbc-core-parent/{{dbeaver.drivers.pgjdbc}}/pgjdbc-core-parent-{{dbeaver.drivers.pgjdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/pgjdbc-versions/{{dbeaver.drivers.pgjdbc}}/pgjdbc-versions-{{dbeaver.drivers.pgjdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/postgresql/{{dbeaver.drivers.postgresql}}/postgresql-{{dbeaver.drivers.postgresql}}.jar -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - wget -nv https://repo1.maven.org/maven2/org/postgresql/postgresql/{{dbeaver.drivers.postgresql}}/postgresql-{{dbeaver.drivers.postgresql}}.pom -P ${DBEAVER_DRIVER_DIR}/org.postgresql/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/{{dbeaver.drivers.postgis_jdbc}}/postgis-jdbc-{{dbeaver.drivers.postgis_jdbc}}.jar -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/{{dbeaver.drivers.postgis_jdbc}}/postgis-jdbc-{{dbeaver.drivers.postgis_jdbc}}.pom -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-geometry/{{dbeaver.drivers.postgis_geometry}}/postgis-geometry-{{dbeaver.drivers.postgis_geometry}}.jar -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - wget -nv https://repo1.maven.org/maven2/net/postgis/postgis-geometry/{{dbeaver.drivers.postgis_geometry}}/postgis-geometry-{{dbeaver.drivers.postgis_geometry}}.pom -P ${DBEAVER_DRIVER_DIR}/net.postgis/ - - mkdir -p ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - wget -nv https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/{{dbeaver.drivers.waffle_jna}}/waffle-jna-{{dbeaver.drivers.waffle_jna}}.jar -P ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - wget -nv https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/{{dbeaver.drivers.waffle_jna}}/waffle-jna-{{dbeaver.drivers.waffle_jna}}.pom -P ${DBEAVER_DRIVER_DIR}/com.github.waffle/ - - mv /opt/build/dbeaver_drivers_config.xml /usr/share/dbeaver-ce/drivers-config.xml - - echo "-Ddbeaver.drivers.configuration-file=/usr/share/dbeaver-ce/drivers-config.xml" >> /usr/share/dbeaver-ce/dbeaver.ini - - echo "-Djava.security.auth.login.config=/etc/jaas.conf" >> /usr/share/dbeaver-ce/dbeaver.ini - - ls -alh ${DBEAVER_DRIVER_DIR}/* - - echo "... successfully installed DBeaver drivers"; - # Install RStudio - - echo "Installing RStudio..." - - /opt/build/download_and_install_deb.sh rstudio - - if [ "$(which rstudio)" = "" ]; then echo "Could not install RStudio!"; exit 1; else echo "... successfully installed RStudio"; fi - # Install bats - - echo "Installing bats..." - - git clone https://github.com/bats-core/bats-core /opt/bats/bats-core - - git clone https://github.com/bats-core/bats-support /opt/bats/bats-support - - git clone https://github.com/bats-core/bats-assert /opt/bats/bats-assert - - git clone https://github.com/bats-core/bats-file /opt/bats/bats-file - - /opt/bats/bats-core/install.sh /usr/local - - if [ "$(which bats)" = "" ]; then echo "Could not install bats!"; exit 1; else echo "... successfully installed bats"; fi - # Install pyenv and pyenv-virtualenv - - echo "Installing pyenv..." - - export PYENV_ROOT="/opt/pyenv" - - rm -rf $PYENV_ROOT 2> /dev/null - - PATH="$PYENV_ROOT/bin:$PATH" # NB. pyenv needs to be at the beginning of the path so that it can override other python versions - - curl -s -S -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash - - pyenv doctor || die "Could not configure pyenv!" - - if [ "$(which pyenv)" = "" ]; then echo "Could not install pyenv!"; exit 1; else echo "... successfully installed pyenv"; fi - # Get icons for webapps - - echo "Downloading icons..." - - mkdir -p /opt/icons - - wget https://raw.githubusercontent.com/hackmdio/codimd/develop/public/favicon.png -O /opt/icons/codimd.png || die "Could not find CodiMD icon!" - - wget https://about.gitlab.com/images/press/logo/png/gitlab-icon-rgb.png -O /opt/icons/gitlab.png || die "Could not find GitLab icon!" - - # Manually update ClamAV signature database - - echo ">=== $(date +%s) Updating ClamAV database ===<" - # We do not update ClamAV at build time since these definitions need to be updated at deployment times - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - # As clamav-unofficial-sigs has not been updated since 2014, we need the following fixes: - # Calls to SecuriteInfo fail with 'curl: (6) Could not resolve host: clamav.securiteinfo.com' - # - disable this source as suggested here: https://bugs.launchpad.net/ubuntu/+source/clamav-unofficial-sigs/+bug/1643172 - - sed -i '/si_dbs=/,/^"$/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - - sed -i '/si_update_hours/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - # Calls to MalwarePatrol fail with 'MalwarePatrol mbl.ndb database integrity tested BAD - SKIPPING' - # - disable this source as suggested here: https://www.mail-archive.com/pkg-clamav-devel@lists.alioth.debian.org/msg05014.html - - sed -i '/mbl_dbs=/,/^"$/d' /usr/share/clamav-unofficial-sigs/conf.d/00-clamav-unofficial-sigs.conf - # Update unofficial signatures (strangely this gives no output in the cloud-init logs) - - clamav-unofficial-sigs 2>&1 - - clamav-unofficial-sigs -i - - # Install Spark and Hadoop - approximately 10 minutes - - echo ">=== $(date +%s) Installing Spark/Hadoop ===<" - - shui install --latest --target /opt - - mv /opt/spark* /opt/spark - - PATH=$PATH:/opt/spark/bin - - if [ "$(which spark-shell)" = "" ]; then echo "Could not install Spark/Hadoop!"; exit 1; else echo "... successfully installed Spark/Hadoop"; fi - - # Install Julia and packages - approximately 20 minutes - - echo ">=== $(date +%s) Installing Julia and packages ===<" - - jill install stable --confirm --install_dir /opt/julia - - JULIA_BASE_DIR=$(ls -d /opt/julia/julia*) - - if [ "$(which julia)" = "" ]; then echo "Could not install Julia!"; exit 1; fi - - export JULIA_PACKAGES="[\"$(sed '/^$/d' /opt/build/packages/packages-julia.list | paste -s -d '|' | sed 's/|/", "/g')\"]" - # Create a global Julia depot for package installation - - export JULIA_DEPOT_PATH="${JULIA_BASE_DIR}/depot/" - - mkdir -p ${JULIA_DEPOT_PATH} - - sed -i "/DEPOT_PATH/d" ${JULIA_BASE_DIR}/etc/julia/startup.jl - - echo "push!(DEPOT_PATH, \"${JULIA_DEPOT_PATH}\")" >> ${JULIA_BASE_DIR}/etc/julia/startup.jl - # Set some Julia environment variables before installing/building the packages - - export JULIA_COPY_STACKS=1 - - export PYTHON="$(which python3)" # link Julia against system Python 3 - - julia -e "using Pkg; Pkg.add($JULIA_PACKAGES); for package in $JULIA_PACKAGES; Pkg.build(package); end" - # Ensure that Julia depot is globally readable - - chmod -R o=u,o-w ${JULIA_BASE_DIR}/depot/ - # Write Julia environment variables to global .bashrc - - echo "export JULIA_COPY_STACKS=${JULIA_COPY_STACKS}" >> /etc/bash.bashrc - - echo "export JULIA_DEPOT_PATH=~/.julia:${JULIA_DEPOT_PATH}" >> /etc/bash.bashrc # ensure that each user's DEPOT_PATH will be the usual default (~/.julia) followed by the global depot - - echo "export PYTHON=${PYTHON}" >> /etc/bash.bashrc - # Check for missing packages - - MISSING_JULIA_PACKAGES=$(julia -e "for package in $JULIA_PACKAGES; try; abspath(joinpath(dirname(Base.find_package(package)))); @eval using \$(Symbol(package)); catch e; println(package); end; end;") - - if [ "$MISSING_JULIA_PACKAGES" ]; then echo "Could not install Julia packages - $MISSING_JULIA_PACKAGES"; exit 1; else echo "... successfully installed Julia"; fi - # Add the Julia kernel to Jupyter - - mv /root/.local/share/jupyter ${JULIA_BASE_DIR}/share - - jupyter kernelspec install ${JULIA_BASE_DIR}/share/jupyter/kernels/julia-* || die "Could not install Julia kernel!" - - # Install python versions - - /opt/build/pyenv/build_requirements.py /opt/build/pyenv/packages-python.yaml - - | - for PYTHON_VERSION in $(ls /opt/build/python-*-requirements.txt | cut -d '-' -f 2 | sort -V); do - /opt/build/pyenv/install.sh $PYTHON_VERSION || die "Could not install Python ${PYTHON_VERSION}!"; - done - - # Configure pyenv: use a local PYENV_ROOT with the global versions symlinked into it - - echo ">=== $(date +%s) Configuring pyenv ===<" - - echo "Installed python versions:" - - du --si -d 1 ${PYENV_ROOT}/versions - - export PYENV_LOCAL_ROOT='${HOME}/.pyenv' - - echo "Configuring pyenv initialisation" - - echo '# Set up pyenv' >> /etc/bash.bashrc - - echo "export PYENV_ROOT=\"${PYENV_LOCAL_ROOT}\"" >> /etc/bash.bashrc - - echo "for version in $(ls ${PYENV_ROOT}/versions | xargs); do [ -d \"${PYENV_LOCAL_ROOT}/versions/\${version}\" ] || (mkdir -p ${PYENV_LOCAL_ROOT}/versions/\${version} && ln -s ${PYENV_ROOT}/versions/\${version}/* ${PYENV_LOCAL_ROOT}/versions/\${version}); done" >> /etc/bash.bashrc - # Setup pyenv executables - - echo 'eval "$(pyenv init --path)"' >> /etc/bash.bashrc - - echo 'eval "$(pyenv init - --no-rehash)"' >> /etc/bash.bashrc - - echo 'eval "$(pyenv virtualenv-init -)"' >> /etc/bash.bashrc - - echo "pyenv rehash" >> /etc/bash.bashrc - - echo "pyenv global $(ls ${PYENV_ROOT}/versions/ | sort -V | tail -n 2 | head -n 1)" >> /etc/bash.bashrc - # Create a centralised Python shim for use by Jupyter - - echo '#!/usr/bin/env bash' > /opt/pyenv/bin/python-for-jupyter - - echo 'exec "$(which python)" "$@"' >> /opt/pyenv/bin/python-for-jupyter - - chmod ugo+x /opt/pyenv/bin/python-for-jupyter - # Add a Jupyter kernel called 'python3' which will overwrite the native kernel - - python3 -m ipykernel install --name "python3" --display-name "Python 3 (active version)" - - | - KERNEL_PATH="/usr/local/share/jupyter/kernels/python3/kernel.json" - if [ -e "${KERNEL_PATH}" ]; then - python3 -c "import json; kernel = json.load(open('${KERNEL_PATH}', 'r')); kernel['argv'][0] = '/opt/pyenv/bin/python-for-jupyter'; json.dump(kernel, open('${KERNEL_PATH}', 'w'), indent=1)" - fi - - # Install any missing R packages - approximately 40 minutes - - echo ">=== $(date +%s) Installing R packages ===<" - - export HOME=/root # this is needed for the installation of the 'credentials' package - - echo '# Set up RStudio' >> /etc/bash.bashrc - - echo "export RSTUDIO_WHICH_R=/usr/bin/R" >> /etc/bash.bashrc # this ensures that all users will pick up system R when running RStudio - # Install CRAN packages and terminate if any are missing - - export CRAN_PACKAGES="\"$(sed '/^$/d' /opt/build/packages/packages-r-cran.list | paste -s -d '|' | sed 's/|/", "/g')\"" - - echo "Preparing to install $(echo $CRAN_PACKAGES | wc -w) CRAN packages\n${CRAN_PACKAGES}" - - Rscript -e "options('Ncpus' = parallel::detectCores()); requested_pkgs <- c($CRAN_PACKAGES); remaining_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; if(length(remaining_pkgs)) { print(paste('Installing', paste(remaining_pkgs, collapse=', '))); install.packages(remaining_pkgs, quiet = TRUE) } else { print('No packages left to install') }" - - MISSING_CRAN_PACKAGES=$(Rscript -e "requested_pkgs <- c($CRAN_PACKAGES); missing_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; print(missing_pkgs)" | sed "s/character(0)//") - - if [ "$MISSING_CRAN_PACKAGES" ]; then echo "Could not install CRAN packages\n${MISSING_CRAN_PACKAGES}"; exit 1; else echo "... successfully installed CRAN packages"; fi - # Install BioConductor packages and terminate if any are missing - - export BIOCONDUCTOR_PACKAGES="\"$(sed '/^$/d' /opt/build/packages/packages-r-bioconductor.list | paste -s -d '|' | sed 's/|/", "/g')\"" - - echo "Preparing to install $(echo $BIOCONDUCTOR_PACKAGES | wc -w) BioConductor packages\n${BIOCONDUCTOR_PACKAGES}" - - Rscript -e "options('Ncpus' = parallel::detectCores()); requested_pkgs <- c($BIOCONDUCTOR_PACKAGES); remaining_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; if(length(remaining_pkgs)) { print(paste('Installing', paste(remaining_pkgs, collapse=', '))); BiocManager::install(remaining_pkgs, quiet = TRUE) } else { print('No packages left to install') }" - - MISSING_BIOCONDUCTOR_PACKAGES=$(Rscript -e "requested_pkgs <- c($BIOCONDUCTOR_PACKAGES); missing_pkgs <- requested_pkgs[!(requested_pkgs %in% installed.packages()[,'Package'])]; print(missing_pkgs)" | sed "s/character(0)//") - - if [ "$MISSING_BIOCONDUCTOR_PACKAGES" ]; then echo "Could not install Bioconductor packages\n${MISSING_BIOCONDUCTOR_PACKAGES}"; exit 1; else echo "... successfully installed BioConductor packages"; fi - # Add the R kernel to Jupyter - - R_VERSION=$(R --version | head -n 1 | cut -d ' ' -f3) - - ln -s /usr/local/lib/R/site-library/IRkernel/kernelspec /usr/local/lib/R/site-library/IRkernel/R${R_VERSION} - - | - sed -i "s|\"display_name\":.*\R.*|\"display_name\": \"R ${R_VERSION}\",|" /usr/local/lib/R/site-library/IRkernel/kernelspec/kernel.json - - jupyter kernelspec install /usr/local/lib/R/site-library/IRkernel/R${R_VERSION} || die "Could not install R kernel!"; - - # Clean up any build artifacts - - echo ">=== $(date +%s) Cleaning up the build environment ===<" - - USED_BYTES_OLD=$(df / | tail -n 1 | awk '{printf $3}') - # Remove netcat (potential security issue) [Note this will remove the 'ubuntu-minimal' metapackage but does not remove any other real packages] - # Remove xscreensaver (unnecessary) - - apt-get remove -y netcat-openbsd xscreensaver - # Fix any broken packages then clean and remove any unused packages - - apt-get --fix-broken install -y - - apt-get autoclean -y - - apt-get autoremove -y --purge - - apt-get clean - # Remove temporary files - - tmpreaper 10m /tmp/ /var/tmp/ # remove temporary files that have not been accessed in 10 minutes - - find /root/ -mindepth 1 -delete # remove all files from root's home directory including dotfiles - # Log space saved - - USED_BYTES_NEW=$(df / | tail -n 1 | awk '{printf $3}') - - echo "Successfully reclaimed $(numfmt --to=iec-i --suffix=B $(($USED_BYTES_OLD - $USED_BYTES_NEW))) of disk space" - - # Check for successful installations - - echo ">=== $(date +%s) Checking environment configuration ===<" - # Check that Jupyter kernels are correctly configured - - echo "Checking installed Jupyter kernels" - - jupyter kernelspec list - # Set PATH to the current working version which contains all installed packages - # Append ~/.local/bin and ~/bin so that any executables that are installed there (eg. by pip) can be used - # We do this at the end of the script so that - # - we know this is the PATH that worked when we checked for each package - # - we only get one entry in /etc/bash.bashrc rather than several with "last-one-wins" - - PATH="$PATH:\$HOME/.local/bin:\$HOME/bin" - - echo "Setting PATH to '${PATH}'" - - sed -i "s|^export PATH=.*|export PATH=${PATH}|" /etc/bash.bashrc - # Run installation tests - - python3 /opt/build/check_installation.py || die "Installation check failed!"; - -final_message: "System setup through cloud-init is finished. Configuration took $UPTIME seconds" - -# Shutdown at the end of the job to save on running costs -power_state: - mode: poweroff - message: "Shutting down after cloud-init is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_desktop/cloud_init/resources/analyse_build.py b/deployment/secure_research_desktop/cloud_init/resources/analyse_build.py deleted file mode 100644 index d320e5f02d..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/analyse_build.py +++ /dev/null @@ -1,158 +0,0 @@ -#! /usr/bin/env python3 -from contextlib import suppress -import csv -from datetime import datetime, timedelta -import glob -import json -import multiprocessing -import subprocess - - -def human_readable(timedelta_): - """Human readable string from timedelta""" - if not isinstance(timedelta_, timedelta): - return "" - seconds = int(timedelta_.total_seconds()) - days, seconds = divmod(seconds, 86400) - hours, seconds = divmod(seconds, 3600) - minutes, seconds = divmod(seconds, 60) - if days > 0: - return f"{days:d}d{hours:d}h{minutes:d}m{seconds:d}s" - if hours > 0: - return f"{hours:d}h{minutes:d}m{seconds:d}s" - if minutes > 0: - return f"{minutes:d}m{seconds:d}s" - return f"{seconds:d}s" - - -def log(timestamp, level, message): - print(f"[{timestamp.strftime(r'%Y-%m-%d %H:%M:%S')}: {level:<7}] {message}") - - -def mean(list_): - return sum(list_) / len(list_) - - -def main(): - """Process log files""" - # Load events from cloud init dump - # -------------------------------- - events = [] - bash_process = subprocess.run(["/bin/bash", "-c", "cloud-init analyze dump"], stdout=subprocess.PIPE, check=True) - cloud_init_log_events = json.loads(bash_process.stdout.decode("utf8")) - - # Use the time at which write_files runs as an indication of when cloud-init started - # This avoids possible clock-skew issues that occurred when trying to use earlier timestamps - with suppress(IndexError): - _ = list(filter(lambda x: x["event_type"] == "start" and x["name"] == "azure-ds/write_files", cloud_init_log_events))[0] - end_entries = list(filter(lambda x: x["event_type"] == "finish" and x["name"] == "azure-ds/write_files", cloud_init_log_events)) - if end_entries: - events.append({"timestamp": datetime.fromtimestamp(end_entries[0]["timestamp"]), "level": end_entries[0]["result"], "message": "Build started"}) - else: - events.append({"timestamp": datetime.now(), "level": "RUNNING", "message": "Build started"}) - - # Get initial cloud-init setup time - with suppress(IndexError): - end_entries = list(filter(lambda x: x["event_type"] == "finish" and x["name"] == "modules-config", cloud_init_log_events)) - if end_entries: - events.append({"timestamp": datetime.fromtimestamp(end_entries[0]["timestamp"]), "level": end_entries[0]["result"], "message": "Running cloud-init modules"}) - else: - events.append({"timestamp": datetime.now(), "level": "RUNNING", "message": "Running cloud-init modules"}) - - # Get package install/update time - with suppress(IndexError): - end_entries = list(filter(lambda x: x["event_type"] == "finish" and x["name"] == "modules-final/config-package_update_upgrade_install", cloud_init_log_events)) - if end_entries: - events.append({"timestamp": datetime.fromtimestamp(end_entries[0]["timestamp"]), "level": end_entries[0]["result"], "message": "Installing/updating Ubuntu packages"}) - else: - events.append({"timestamp": datetime.now(), "level": "RUNNING", "message": "Installing/updating Ubuntu packages"}) - - # Get total time - build_end_status = None - with suppress(IndexError): - entry = list(filter(lambda x: x["event_type"] == "finish" and x["name"] == "modules-final", cloud_init_log_events))[0] - events.append({"timestamp": datetime.fromtimestamp(entry["timestamp"]), "level": entry["result"], "message": "Finished build"}) - if entry["result"]: - build_end_status = (datetime.fromtimestamp(entry["timestamp"] - 1), entry["result"]) - - # Load events from runcmd echo statements - # --------------------------------------- - runcmd_log_events = [] - with suppress(subprocess.CalledProcessError): - bash_process = subprocess.run(["grep", ">===", "/var/log/cloud-init-output.log"], stdout=subprocess.PIPE, check=True) - for event in bash_process.stdout.decode("utf8").split("\n"): - with suppress(IndexError, ValueError): - start_time = event.split(" ")[1] - message = event.split(start_time)[1].replace("===<", "").strip() - runcmd_log_events.append({"start_time": int(start_time), "end_time": None, "message": message}) - for event, next_event in zip(runcmd_log_events[:-1], runcmd_log_events[1:]): - events.append({"timestamp": datetime.fromtimestamp(next_event["start_time"]), "level": "SUCCESS", "message": event["message"]}) - - # Add currently running task - # -------------------------- - if runcmd_log_events: - current_task = runcmd_log_events[-1]["message"] - if build_end_status: - events.append({"timestamp": build_end_status[0], "level": build_end_status[1], "message": current_task}) - else: - events.append({"timestamp": datetime.now(), "level": "RUNNING", "message": current_task}) - - # Log all events - # -------------- - events.sort(key=lambda x: x["timestamp"]) - previous_event_time = None - for event in events: - time_elapsed = "" - if previous_event_time: - if event["message"] == "Finished build": - time_elapsed = event["timestamp"] - events[0]["timestamp"] - else: - time_elapsed = event["timestamp"] - previous_event_time - log(event["timestamp"], event["level"], f"{event['message']}: {human_readable(time_elapsed)}") - previous_event_time = event["timestamp"] - - # Check system performance - # ------------------------ - n_cores = multiprocessing.cpu_count() - mem_usage, cpu_usage, mem_gb = [], [], 0 - with suppress(FileNotFoundError): - with open("/opt/monitoring/performance_log.csv", "r") as system_log: - for row in csv.DictReader(system_log): - if build_end_status: - timestamp = datetime.strptime(row["now"], r"%Y-%m-%d %H:%M:%S %Z") - if timestamp > build_end_status[0]: - break - mem_usage.append(100 * float(row["mem.used"]) / float(row["mem.total"])) - cpu_usage.append(100 - float(row["cpu.idle"])) - mem_gb = max(mem_gb, float(row["mem.total"]) / (1000 * 1000 * 1000)) - - timestamp = build_end_status[0] if build_end_status else datetime.now() - with suppress(ZeroDivisionError): - # Memory - log(timestamp, "INFO", f"Memory available: {int(round(mem_gb)):d} GB") - mem_mean, mem_min, mem_max = mean(mem_usage), min(mem_usage), max(mem_usage) - log(timestamp, "INFO", f"..... mean usage: {mem_mean:>6.2f}% => {(mem_gb * mem_mean / 100):>4.1f} GB") - log(timestamp, "INFO", f"...... min usage: {mem_min:>6.2f}% => {mem_gb * mem_min / 100:>4.1f} GB") - log(timestamp, "INFO", f"...... max usage: {mem_max:>6.2f}% => {mem_gb * mem_max / 100:>4.1f} GB") - # CPU - log(timestamp, "INFO", f"CPU available: {int(n_cores):d} cores") - cpu_mean, cpu_min, cpu_max = mean(cpu_usage), min(cpu_usage), max(cpu_usage) - log(timestamp, "INFO", f"..... mean usage: {cpu_mean:>6.2f}% => {(n_cores * cpu_mean / 100):>4.1f} cores") - log(timestamp, "INFO", f"...... min usage: {cpu_min:>6.2f}% => {(n_cores * cpu_min / 100):>4.1f} cores") - log(timestamp, "INFO", f"...... max usage: {cpu_max:>6.2f}% => {(n_cores * cpu_max / 100):>4.1f} cores") - - # Check python installations - # -------------------------- - with suppress(FileNotFoundError): - for fname in glob.glob("/opt/monitoring/python-*-safety-check.json"): - with open(fname, "r") as f_safety_check: - output = json.load(f_safety_check) - if output["affected_packages"]: - python_version = fname.split("-")[1] - log(timestamp, "WARNING", f"Safety check found problems with Python {python_version}") - for package in output["vulnerabilities"]: - log(timestamp, "WARNING", f"... {package['package_name']} [{package['analyzed_version']}] is affected by issue {package['vulnerability_id']} (for versions {package['all_vulnerable_specs']})") - - -if __name__ == "__main__": - main() diff --git a/deployment/secure_research_desktop/cloud_init/resources/check_installation.py b/deployment/secure_research_desktop/cloud_init/resources/check_installation.py deleted file mode 100644 index f1460584d8..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/check_installation.py +++ /dev/null @@ -1,114 +0,0 @@ -#! /usr/bin/env python3 -import os -import glob -import subprocess -import sys - - -def run(command): - return subprocess.run( - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - encoding="utf8", - ).stdout.strip() - - -def run_tests(success, failure, *tests): - for test in tests: - executable = test[0] - if executable == "python": - python_version = test[1] - exists = run(f"ls /opt/pyenv/versions/{python_version}/bin/python") - version = run(f"/opt/pyenv/versions/{python_version}/bin/python --version | cut -d ' ' -f 2") - executable = f"Python {'.'.join(python_version.split('.')[:2])}" - elif executable == "pip": - python_version = test[1] - exists = run(f"ls /opt/pyenv/versions/{python_version}/bin/pip") - version = run(f"/opt/pyenv/versions/{python_version}/bin/pip -V | cut -d ' ' -f 2") - executable = f"pip (Python {'.'.join(python_version.split('.')[:2])})" - else: - exists = run(f"which {executable}") - version = run(test[1]) if exists else None - if version: - print(f"... {executable} [{exists}] {version}") - success += 1 - else: - print(f"... ERROR {executable} not found!") - failure += 1 - return (success, failure) - - -# Run all tests -success, failure = 0, 0 -python_versions = [("python", os.path.split(path)[1]) for path in glob.glob("/opt/pyenv/versions/*")] -pip_versions = [("pip", os.path.split(path)[1]) for path in glob.glob("/opt/pyenv/versions/*")] - -print("Programming languages:") -(success, failure) = run_tests( - success, - failure, - ("cmake", "cmake --version 2>&1 | head -n 1 | awk '{print $3}'"), - ("g++", "g++ --version | grep g++ | awk '{print $NF}'"), - ("gcc", "gcc --version | grep gcc | awk '{print $NF}'"), - ("gfortran", "gfortran --version | grep Fortran | awk '{print $NF}'"), - ("java", "java -version 2>&1 | grep 'openjdk version' | cut -d '\"' -f 2"), - ("julia", "julia --version | awk '{print $NF}'"), - *python_versions, - ("R", "R --version | grep 'R version' | awk '{print $3}'"), - ("rustc", "rustc --version 2>&1 | awk '{print $2}'"), - ("scala", "scalac -version 2>&1 | awk '{print $4}'"), - ("spark-shell", "spark-shell --version 2>&1 | grep version | grep -v Scala | awk '{print $NF}'"), -) - -print("Package managers:") -(success, failure) = run_tests( - success, - failure, - ("cargo", "cargo -V"), - *pip_versions, -) - -print("Editors/IDEs:") -(success, failure) = run_tests( - success, - failure, - ("code", "code -v --user-data-dir /tmp 2>/dev/null | head -n 1"), - ("emacs", "emacs --version | head -n 1 | awk '{print $NF}'"), - ("nano", "nano --version | head -n 1 | awk '{print $NF}'"), - ("pycharm-community", "snap list pycharm-community | tail -n 1 | awk '{print $2}'"), - ("rstudio", "dpkg -s rstudio | grep '^Version:' | awk '{print $NF}'"), - ("vim", "dpkg -s vim | grep '^Version:' | cut -d ':' -f 3"), -) - -print("Presentation tools:") -(success, failure) = run_tests( - success, - failure, - ("latex", "latex --version | grep 'TeX Live' | awk '{print $2}'"), - ("libreoffice", "libreoffice --version | head -n 1 | awk '{print $2}'"), - ("xelatex", "xelatex --version | grep 'TeX Live' | awk '{print $2}'"), -) - -print("Development tools:") -(success, failure) = run_tests( - success, - failure, - ("azuredatastudio", "dpkg -s azuredatastudio | grep '^Version:' | awk '{print $NF}'"), - ("bash", "bash --version | head -n 1 | awk '{print $4}'"), - ("dbeaver-ce", "dpkg -s dbeaver-ce | grep '^Version:' | awk '{print $NF}'"), - ("firefox", "firefox --version | awk '{print $NF}'"), - ("git", "git --version | awk '{print $NF}'"), - ("htop", "htop --version | head -n 1 | awk '{print $2}'"), - ("nvidia-smi", "modinfo nvidia | grep '^version:' | awk '{print $NF}'"), - ("psql", "psql --version | awk '{print $NF}' | sed 's/)//'"), - ("sqlcmd", "sqlcmd -? | grep Version | awk '{print $2}'"), - ("weka", "weka -c weka.core.Version 2> /dev/null | head -n 1"), -) - -# Return appropriate code -print(f"{success + failure} test(s), {failure} failure(s)") -if failure > 0: - sys.exit(os.EX_SOFTWARE) -sys.exit(os.EX_OK) diff --git a/deployment/secure_research_desktop/cloud_init/resources/dbeaver_drivers_config.mustache.xml b/deployment/secure_research_desktop/cloud_init/resources/dbeaver_drivers_config.mustache.xml deleted file mode 100644 index 7ce425aea4..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/dbeaver_drivers_config.mustache.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/deployment/secure_research_desktop/cloud_init/resources/deprovision_vm.sh b/deployment/secure_research_desktop/cloud_init/resources/deprovision_vm.sh deleted file mode 100644 index 76afa0f351..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/deprovision_vm.sh +++ /dev/null @@ -1,28 +0,0 @@ -#! /bin/bash - -# Clean up temporary files -rm -rf /root/* /root/.[a-zA-Z_]* /tmp/* /tmp/.[a-zA-Z_]* - -# Disconnect omsagent from default workspace if it exists -if [ -d "/opt/microsoft/omsagent/" ]; then - # Disconnecting omsagent from default workspace - echo "Connected workspaces:" - /opt/microsoft/omsagent/bin/omsadmin.sh -l - echo "Disconnecting omsagent from connected workspace:" - /opt/microsoft/omsagent/bin/omsadmin.sh -X -else - echo "omsagent not found, continuing..." -fi - -# Deprovision this VM -echo -e "\n$(date -u --iso-8601=seconds): Calling deprovisioner on this VM" -waagent -deprovision+user -force 2>&1 - -# Fix internet connectivity that is broken by waagent deprovisioning (needed in older Ubuntu versions) -echo -e "\n$(date -u --iso-8601=seconds): Fixing internet connectivity" -if [ ! -e /etc/resolv.conf ]; then ln -s /run/systemd/resolve/stub-resolv.conf /etc/resolv.conf; fi - -# Remove execute permissions from this file -echo -e "\n$(date -u --iso-8601=seconds): Removing execute permissions from this script" -chmod ugo-x /opt/build/deprovision_vm.sh -ls -alh /opt/build/deprovision_vm.sh \ No newline at end of file diff --git a/deployment/secure_research_desktop/cloud_init/resources/download_and_install_deb.sh b/deployment/secure_research_desktop/cloud_init/resources/download_and_install_deb.sh deleted file mode 100644 index 6d082dd448..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/download_and_install_deb.sh +++ /dev/null @@ -1,50 +0,0 @@ -#! /bin/bash - -# Require one arguments: config file identifier -if [ $# -ne 1 ]; then - echo "FATAL: Incorrect number of arguments" - exit 1 -fi -PACKAGE_NAME=$1 - -# Ensure that the config file exists -CONFIG_FILE="/opt/build/${PACKAGE_NAME}.debinfo" -if [ ! -e $CONFIG_FILE ]; then - echo "FATAL: Config file could not be loaded from $CONFIG_FILE" - exit 2 -fi - -# Parse the config file -PACKAGE_HASH=$(grep "hash:" $CONFIG_FILE | cut -d':' -f2-99 | sed 's|^ ||') -PACKAGE_VERSION=$(grep "version:" $CONFIG_FILE | cut -d':' -f2-99 | sed 's|^ ||') -PACKAGE_DEBFILE=$(grep "debfile:" $CONFIG_FILE | cut -d':' -f2-99 | sed 's|^ ||' | sed "s/|VERSION|/$PACKAGE_VERSION/") -PACKAGE_REMOTE=$(grep "remote:" $CONFIG_FILE | cut -d':' -f2-99 | sed 's|^ ||' | sed "s/|VERSION|/$PACKAGE_VERSION/" | sed "s/|DEBFILE|/$PACKAGE_DEBFILE/") - -# Ensure that all required variables have been set -if [ ! "$PACKAGE_DEBFILE" ]; then exit 3; fi -if [ ! "$PACKAGE_HASH" ]; then exit 3; fi -if [ ! "$PACKAGE_NAME" ]; then exit 3; fi -if [ ! "$PACKAGE_REMOTE" ]; then exit 3; fi - -# Download and verify the .deb file -echo "Downloading and verifying deb file..." -wget -nv $PACKAGE_REMOTE -P /opt/build/ -ls -alh /opt/build/${PACKAGE_DEBFILE} -echo "$PACKAGE_HASH /opt/build/${PACKAGE_DEBFILE}" > /tmp/${PACKAGE_NAME}_sha256.hash -if [ "$(sha256sum -c /tmp/${PACKAGE_NAME}_sha256.hash | grep FAILED)" != "" ]; then - echo "FATAL: Checksum did not match expected for $PACKAGE_NAME" - exit 4 -fi - -# Wait until the package repository is not in use -while true; do - apt-get check >/dev/null 2>&1 - if [ "$?" -eq "0" ]; then break; fi - echo "Waiting for another installation process to finish..." - sleep 1 -done - -# Install and cleanup -echo "Installing deb file: /opt/build/${PACKAGE_DEBFILE}" -gdebi --non-interactive /opt/build/${PACKAGE_DEBFILE} -rm /opt/build/${PACKAGE_DEBFILE} diff --git a/deployment/secure_research_desktop/cloud_init/resources/jaas.conf b/deployment/secure_research_desktop/cloud_init/resources/jaas.conf deleted file mode 100644 index 01d2712b2f..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/jaas.conf +++ /dev/null @@ -1,7 +0,0 @@ -pgjdbc { -com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - debug=true - renewTGT=true - doNotPrompt=true; -}; \ No newline at end of file diff --git a/deployment/secure_research_desktop/cloud_init/resources/pycharm.desktop b/deployment/secure_research_desktop/cloud_init/resources/pycharm.desktop deleted file mode 100644 index 3c1d953b52..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/pycharm.desktop +++ /dev/null @@ -1,7 +0,0 @@ -[Desktop Entry] -Version=1.0 -Type=Application -Name=PyCharm -Exec=/snap/bin/pycharm-community -Icon=/snap/pycharm-community/current/meta/gui/icon.png -Categories=Utility;TextEditor;Development;IDE; \ No newline at end of file diff --git a/deployment/secure_research_desktop/cloud_init/resources/pyenv_build_requirements.py b/deployment/secure_research_desktop/cloud_init/resources/pyenv_build_requirements.py deleted file mode 100644 index edd2bad708..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/pyenv_build_requirements.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python3 -import os -import subprocess -import sys -import yaml -from packaging.version import InvalidVersion, Version - - -def read_yaml(yaml_file): - with open(yaml_file, "r") as f_yaml: - requirements = yaml.safe_load(f_yaml) - - for version in requirements["versions"]: - package_list = [] - for package_name, details in requirements["packages"].items(): - suffix = None - if details: - constraints = sum([details.get(v, []) for v in ("all", version)], []) - if "uninstallable" in constraints: - continue - suffix = ",".join(constraints) - package_list.append((package_name, suffix)) - yield (version, package_list) - - -def to_version(input_str): - try: - return Version(input_str) - except InvalidVersion: - return None - - -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Path to a YAML file is a required input") - sys.exit(1) - - # Get list of available Python versions - available_versions = ( - subprocess.run( - ["pyenv", "install", "--list"], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - env=os.environ.copy(), - encoding="utf8", - ) - .stdout.strip() - .split("\n") - ) - available_versions = [ - v for v in map(to_version, available_versions) if v and not v.is_prerelease - ] - - # Find the most up-to-date Python version and write out a list of packages - python_versions = [] - for base_version, package_list in read_yaml(sys.argv[1]): - python_version = sorted( - [v for v in available_versions if f"{v.major}.{v.minor}" == base_version] - )[-1] - python_versions.append(python_version) - - # Write list of package names - file_path = f"/opt/build/packages/packages-python-{python_version}.list" - with open(file_path, "w") as f_list: - f_list.writelines(f"{pkg[0]}\n" for pkg in package_list) - - # Write requirements file - file_path = f"/opt/build/python-{python_version}-requirements.txt" - with open(file_path, "w") as f_requirements: - for (name, constraint) in package_list: - if constraint: - f_requirements.write(f"{name}{constraint}\n") - else: - f_requirements.write(f"{name}\n") diff --git a/deployment/secure_research_desktop/cloud_init/resources/pyenv_install.sh b/deployment/secure_research_desktop/cloud_init/resources/pyenv_install.sh deleted file mode 100644 index 56898d3915..0000000000 --- a/deployment/secure_research_desktop/cloud_init/resources/pyenv_install.sh +++ /dev/null @@ -1,96 +0,0 @@ -#! /bin/bash -# shellcheck disable=SC2046,SC2086 - -# Ensure environment name is provided -# ----------------------------------- -if [ $# -ne 1 ]; then - echo "FATAL: Incorrect number of arguments" - exit 1 -fi -PYTHON_VERSION=$1 -PYENV_ROOT="$(pyenv root)" -MONITORING_LOG="/opt/monitoring/python-${PYTHON_VERSION}-package-versions.log" -REQUIREMENTS_TXT="/opt/build/python-${PYTHON_VERSION}-requirements.txt" -REQUESTED_PACKAGE_LIST="/opt/build/packages/packages-python-${PYTHON_VERSION}.list" -SAFETY_CHECK_JSON="/opt/monitoring/python-${PYTHON_VERSION}-safety-check.json" - - -# Ensure that pyenv is active and determine which Python version to use -# --------------------------------------------------------------------- -echo ">=== $(date +%s) Installing Python ($PYTHON_VERSION) and packages ===<" -PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install --skip-existing "$PYTHON_VERSION" -EXE_PATH="${PYENV_ROOT}/versions/${PYTHON_VERSION}/bin" -echo "Installed $(${EXE_PATH}/python --version)" - - -# Install and upgrade installation prerequisites -# ---------------------------------------------- -echo "Installing and upgrading installation prerequisites for Python ${PYTHON_VERSION}..." -${EXE_PATH}/pip install --upgrade pip pip-tools setuptools - - -# Solve dependencies and write package versions to monitoring log -# --------------------------------------------------------------- -echo "Determining package versions with pip-compile..." -${EXE_PATH}/pip-compile -o "$MONITORING_LOG" "$REQUIREMENTS_TXT" - - -# Install pinned packages using pip -# --------------------------------- -echo "Installing packages with pip..." -${EXE_PATH}/pip install -r "$MONITORING_LOG" - - -# Run any post-install commands -# ----------------------------- -echo "Running post-install commands..." -INSTALLED_PACKAGES=$(${EXE_PATH}/pip list --format columns | tail -n+3 | awk '{print $1}' | tr '[:upper:]' '[:lower:]') -for INSTALLED_PACKAGE in $INSTALLED_PACKAGES; do - if [ "$INSTALLED_PACKAGE" == "gensim" ]; then - export GENSIM_DATA_DIR=/usr/share/gensim_data - for dataset in "text8" "fake-news"; do - ${EXE_PATH}/python -m gensim.downloader --download $dataset - done - sleep 30 - fi - if [ "$INSTALLED_PACKAGE" == "nltk" ]; then - ${EXE_PATH}/python -m nltk.downloader all -d /usr/share/nltk_data - fi - if [ "$INSTALLED_PACKAGE" == "spacy" ]; then - ${EXE_PATH}/python -m spacy download en_core_web_sm - ${EXE_PATH}/python -m spacy download en_core_web_md - ${EXE_PATH}/python -m spacy download en_core_web_lg - fi -done - - -# Check that all requested packages are installed -# ----------------------------------------------- -MISSING_PACKAGES="" -while read -r REQUESTED_PACKAGE; do - REQUESTED_PACKAGE_LOWER=$(echo $REQUESTED_PACKAGE | tr '[:upper:]' '[:lower:]') - for INSTALLED_PACKAGE in $INSTALLED_PACKAGES; do - if [ "$REQUESTED_PACKAGE_LOWER" == "$INSTALLED_PACKAGE" ]; then break; fi - done - if [ "$REQUESTED_PACKAGE_LOWER" != "$INSTALLED_PACKAGE" ]; then - MISSING_PACKAGES="$MISSING_PACKAGES $REQUESTED_PACKAGE" - fi -done < "$REQUESTED_PACKAGE_LIST" -if [ "$MISSING_PACKAGES" ]; then - echo "FATAL: The following requested packages are missing:" - echo "$MISSING_PACKAGES" - exit 1 -else - echo "All requested Python ${PYTHON_VERSION} packages are installed" -fi - - -# Run safety check and log any problems -# ------------------------------------- -echo "Running safety check on Python ${PYTHON_VERSION} installation..." -${EXE_PATH}/safety check --save-json $SAFETY_CHECK_JSON - - -# Clean up -# -------- -rm -rf "/root/.pyenv" diff --git a/deployment/secure_research_desktop/network_rules/vmimages-nsg-rules-build-candidates.json b/deployment/secure_research_desktop/network_rules/vmimages-nsg-rules-build-candidates.json deleted file mode 100644 index 25b994ed13..0000000000 --- a/deployment/secure_research_desktop/network_rules/vmimages-nsg-rules-build-candidates.json +++ /dev/null @@ -1,66 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{srdImage.build.subnet.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAdminApprovedSshInbound", - "access": "Allow", - "description": "Allow inbound SSH connections from authorised image building IP addresses", - "destinationAddressPrefix": "{{srdImage.build.subnet.cidr}}", - "destinationPortRange": "22", - "direction": "Inbound", - "priority": 2100, - "protocol": "TCP", - "sourceAddressPrefix": [ - {{#buildAdminIpAddresses}} - "{{.}}", - {{/buildAdminIpAddresses}} - ], - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalInternetOutbound", - "access": "Allow", - "description": "Allow outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{srdImage.build.subnet.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/secure_research_desktop/packages/dbeaver-driver-versions.json b/deployment/secure_research_desktop/packages/dbeaver-driver-versions.json deleted file mode 100644 index d78e3652d8..0000000000 --- a/deployment/secure_research_desktop/packages/dbeaver-driver-versions.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "mssql_jdbc": "12.6.1.jre8", - "pgjdbc": "1.1.6", - "postgis_geometry": "2023.1.0", - "postgis_jdbc": "2023.1.0", - "postgresql": "42.7.3", - "waffle_jna": "3.3.0" -} \ No newline at end of file diff --git a/deployment/secure_research_desktop/packages/deb-azuredatastudio.version b/deployment/secure_research_desktop/packages/deb-azuredatastudio.version deleted file mode 100644 index 8fe0e63d27..0000000000 --- a/deployment/secure_research_desktop/packages/deb-azuredatastudio.version +++ /dev/null @@ -1,4 +0,0 @@ -hash: f4f612b03d30675016132bf399f3f03d44f018478716f3b1517378f0eced00dc -version: 1.48.0 -debfile: azuredatastudio-linux-|VERSION|.deb -remote: https://sqlopsbuilds.azureedge.net/stable/4970733324ef8254b7c22a5dc55af7f8a1dea93f/|DEBFILE| diff --git a/deployment/secure_research_desktop/packages/deb-rstudio-focal.version b/deployment/secure_research_desktop/packages/deb-rstudio-focal.version deleted file mode 100644 index 3fc09f630f..0000000000 --- a/deployment/secure_research_desktop/packages/deb-rstudio-focal.version +++ /dev/null @@ -1,4 +0,0 @@ -hash: 81f221be29eb39efe65be48490c3cd090ddaf8991ea7a089baa0cdd326109e02 -version: 2023.12.1-402 -debfile: rstudio-|VERSION|-amd64.deb -remote: https://download1.rstudio.org/electron/focal/amd64/|DEBFILE| diff --git a/deployment/secure_research_desktop/packages/deb-rstudio-jammy.version b/deployment/secure_research_desktop/packages/deb-rstudio-jammy.version deleted file mode 100644 index 9449291678..0000000000 --- a/deployment/secure_research_desktop/packages/deb-rstudio-jammy.version +++ /dev/null @@ -1,4 +0,0 @@ -hash: 75542cc24c59404f8d62815bc0e31b43032b5032e651fa9f618dbcdca8aa7cac -version: 2023.12.1-402 -debfile: rstudio-|VERSION|-amd64.deb -remote: https://download1.rstudio.org/electron/jammy/amd64/|DEBFILE| diff --git a/deployment/secure_research_desktop/packages/packages-apt.list b/deployment/secure_research_desktop/packages/packages-apt.list deleted file mode 100644 index 878a8a5b78..0000000000 --- a/deployment/secure_research_desktop/packages/packages-apt.list +++ /dev/null @@ -1,110 +0,0 @@ -adcli -apt-transport-https -aspell -auditd -automake -autotools-dev -bison -blobfuse -build-essential -cargo -clamav -clamav-base -clamav-daemon -clamav-freshclam -clamav-unofficial-sigs -cmake -code -cuda -curl -dbeaver-ce -default-jdk -emacs -firefox -flex -g++ -gcc -gdebi-core -gfortran -git -grass -imagemagick -jags -jupyter -jupyter-nbconvert -jupyter-notebook -krb5-user -ldap-utils -libboost-all-dev -libbz2-dev -libcudnn8 -libcudnn8-dev -libcurl4-openssl-dev -libeccodes-dev -libffi-dev -libfftw3-dev -libfribidi-dev -libgdal-dev -libglpk-dev -libgmp-dev -libgsl-dev -libgtk2.0-dev -libharfbuzz-dev -liblapack-dev -liblzma-dev -libmagick++-dev -libmpfr-dev -libncurses5-dev -libncursesw5-dev -libnode-dev -libopenblas-dev -libpoppler-cpp-dev -libpq-dev -libreadline-dev -libreoffice -libsasl2-dev -libsodium-dev -libsqlite3-dev -libssl-dev -libtool -libudunits2-dev -libxml2-dev -libzmq3-dev -llvm -lyx -m4 -nfs-common -octave -onboard -p7zip -parallel -postgresql-client -python3 -python3-openssl -python3-pip -qgis -r-base -r-base-core -r-base-dev -realmd -ristretto -rustc -scala -sssd -sssd-tools -sysstat -texlive-full -texstudio -tk-dev -tmpreaper -unixodbc-dev -unzip -weka -weka-doc -wget -xfce4 -xfce4-terminal -xrdp -xz-utils -zlib1g-dev -zsh diff --git a/deployment/secure_research_desktop/packages/packages-julia.list b/deployment/secure_research_desktop/packages/packages-julia.list deleted file mode 100644 index aefb2bd674..0000000000 --- a/deployment/secure_research_desktop/packages/packages-julia.list +++ /dev/null @@ -1,42 +0,0 @@ -ArgCheck -ArgParse -BenchmarkTools -Conda -CSV -DataFrames -DataStructures -Dates -Debugger -DecisionTree -DifferentialEquations -Distributions -DSP -Formatting -GLM -GR -HypothesisTests -IJulia -Images -Interact -JDBC -JLD2 -JSON -KernelDensity -LaTeXStrings -LightGraphs -LightXML -MLBase -NearestNeighbors -Nettle -Optim -Parameters -Plots -ProtoBuf -PyCall -PyPlot -Roots -ScikitLearn -StaticArrays -StatsBase -StatsFuns -Zygote diff --git a/deployment/secure_research_desktop/packages/packages-python-system.txt b/deployment/secure_research_desktop/packages/packages-python-system.txt deleted file mode 100644 index cd12870e8b..0000000000 --- a/deployment/secure_research_desktop/packages/packages-python-system.txt +++ /dev/null @@ -1,7 +0,0 @@ -glances # for system monitoring -jill # for installing Julia -matplotlib # for PyPlot Julia package -packaging # for pyenv installation -PyYAML # for pyenv installation -requests # for pyenv installation -shui # for installing Spark/Hadoop diff --git a/deployment/secure_research_desktop/packages/packages-python.yaml b/deployment/secure_research_desktop/packages/packages-python.yaml deleted file mode 100644 index 3c9ab9a948..0000000000 --- a/deployment/secure_research_desktop/packages/packages-python.yaml +++ /dev/null @@ -1,82 +0,0 @@ -# Structure of this file is as follows: -# versions -# - list of Python versions to install -# packages: -# - list of package to install in each Python version -# [Optional] arguments to each package -# : -versions: - - "3.10" - - "3.11" - - "3.12" -packages: - arviz: - beautifulsoup4: - black: - Cython: - dash: - dask: - flair: - flake8: - folium: - gensim: - geopandas: - gym: - html5lib: - ipykernel: - keras: - "3.12": ["uninstallable"] # requires tensorflow - lightgbm: - "all": ["[dask]"] - lxml: - matplotlib: - nltk: - numba: - numpy: - "all": [">1.0.0"] # increase solver flexibility - pandas: - pandasql: - pathos: - pg8000: - Pillow: - pip-tools: - plotly: - prophet: - psycopg2: - pydot: - pygrib: - pylint: - pymc: - "3.12": ["uninstallable"] # Dependency conflict with other (unidentified) package - pyodbc: - pyproj: - pyshp: - pystan: - "3.12": ["uninstallable"] # Does not currently support 3.12 due to dependency issue - pytest: - PyYAML: - regex: - requests: - safety: # also used by installation scripts - scikit-image: - "all": [">0.10.0"] # increase solver flexibility - scikit-learn: - scipy: - seaborn: - spacy-langdetect: - spacy: - Sphinx: - SQLAlchemy: - statsmodels: - "all": [">0.10.0"] # increase solver flexibility - sympy: - tables: - tensorflow: - thinc: - torch: - torchvision: - "3.12": ["uninstallable"] # does not yet support 3.12 - tsfresh: - wordcloud: - xgboost: - xlrd: diff --git a/deployment/secure_research_desktop/packages/packages-r-bioconductor.list b/deployment/secure_research_desktop/packages/packages-r-bioconductor.list deleted file mode 100644 index 16e4616151..0000000000 --- a/deployment/secure_research_desktop/packages/packages-r-bioconductor.list +++ /dev/null @@ -1,26 +0,0 @@ -annotate -AnnotationDbi -Biobase -BiocGenerics -BiocParallel -BiocVersion -biomaRt -Biostrings -DelayedArray -genefilter -GenomeInfoDb -GenomicRanges -graph -Icens -impute -IRanges -limma -mixOmics -phyloseq -RBGL -Rsamtools -S4Vectors -SummarizedExperiment -sva -XVector -zlibbioc diff --git a/deployment/secure_research_desktop/packages/packages-r-cran.list b/deployment/secure_research_desktop/packages/packages-r-cran.list deleted file mode 100644 index 7cf7e374a6..0000000000 --- a/deployment/secure_research_desktop/packages/packages-r-cran.list +++ /dev/null @@ -1,44 +0,0 @@ -arrow -BiocManager -caret -csv -data.table -dataCompareR -DataExplorer -DBI -devtools -esquisse -ggforce -ggvis -glmnet -IRkernel -janitor -knitr -mlr3 -MSSQL -odbc -parsnip -plotly -plumber -profvis -prophet -quanteda -ranger -RMariaDB -RMySQL -RPostgres -RSQLite -rstan -shiny -sqldf -testthat -text2vec -tidytext -tidyverse -tidyxl -tsbox -validate -vroom -xgboost -XML -yaml \ No newline at end of file diff --git a/deployment/secure_research_desktop/packages/packages-snap.list b/deployment/secure_research_desktop/packages/packages-snap.list deleted file mode 100644 index c3b2de1c6f..0000000000 --- a/deployment/secure_research_desktop/packages/packages-snap.list +++ /dev/null @@ -1 +0,0 @@ -pycharm-community --classic \ No newline at end of file diff --git a/deployment/secure_research_desktop/setup/Convert_VM_To_Image.ps1 b/deployment/secure_research_desktop/setup/Convert_VM_To_Image.ps1 deleted file mode 100644 index ecf146e1a0..0000000000 --- a/deployment/secure_research_desktop/setup/Convert_VM_To_Image.ps1 +++ /dev/null @@ -1,133 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Specify a machine name to turn into an image. Ensure that the build script has completely finished before running this")] - [string]$vmName -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.srdImage.subscription -ErrorAction Stop - - -# Construct build VM parameters -# ----------------------------- -$buildVmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.srdImage.keyVault.name -SecretName $config.keyVault.secretNames.buildImageAdminUsername -DefaultValue "srdbuildadmin" -AsPlaintext - - -# Setup image resource group if it does not already exist -# ------------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.srdImage.images.rg -Location $config.srdImage.location - - -# Look for this VM in the appropriate resource group -# -------------------------------------------------- -$vm = Get-AzVM -Name $vmName -ResourceGroupName $config.srdImage.build.rg -ErrorVariable notExists -ErrorAction SilentlyContinue -if ($notExists) { - Add-LogMessage -Level Error "Could not find a machine called '$vmName' in resource group $($config.srdImage.build.rg)" - Add-LogMessage -Level Info "Available machines are:" - foreach ($candidateVM in Get-AzVM -ResourceGroupName $config.srdImage.build.rg) { - Add-LogMessage -Level Info " $($candidateVM.Name)" - } - Add-LogMessage -Level Fatal "Could not find a machine called '$vmName'!" -} - - -# Ensure that the VM is running -# ----------------------------- -Start-VM -Name $vmName -ResourceGroupName $config.srdImage.build.rg -Start-Sleep 60 # Wait to ensure that SSH is able to accept connections - - -# Check the VM build status and ask for user confirmation -# ------------------------------------------------------- -Add-LogMessage -Level Info "Obtaining build status for candidate: $($vm.Name)..." -$null = Invoke-RemoteScript -VMName $vm.Name -ResourceGroupName $config.srdImage.build.rg -Shell "UnixShell" -Script "python3 /opt/monitoring/analyse_build.py" -Add-LogMessage -Level Warning "Please check the output of the build analysis script (above) before continuing. All steps should have completed with a 'SUCCESS' message." -$confirmation = $null -while ($confirmation -ne "y") { - if ($confirmation -eq "n") { exit 0 } - $confirmation = Read-Host "Can you confirm that all steps of the '$($vm.Name)' build completed successfully? [y/n]" -} - - -# Deprovision the VM over SSH -# --------------------------- -Add-LogMessage -Level Info "Deprovisioning VM: $($vm.Name)..." -$adminPasswordName = "$($config.keyVault.secretNames.buildImageAdminPassword)-${vmName}" -$publicIp = (Get-AzPublicIpAddress -ResourceGroupName $config.srdImage.build.rg | Where-Object { $_.Id -Like "*$($vm.Name)-NIC-PIP" }).IpAddress -Add-LogMessage -Level Info "... preparing to send deprovisioning command over SSH to: $publicIp..." -Add-LogMessage -Level Info "... the password for this account is in the '${adminPasswordName}' secret in the '$($config.srdImage.keyVault.name)' Key Vault" -ssh -t ${buildVmAdminUsername}@${publicIp} 'sudo /opt/build/deprovision_vm.sh | sudo tee /opt/monitoring/deprovision.log' -if (-not $?) { - Add-LogMessage -Level Fatal "Unable to send deprovisioning command!" -} - - -# Poll VM to see whether it has finished running -# ---------------------------------------------- -Add-LogMessage -Level Info "Waiting for deprovisioning to finish..." -$progress = 0 -$statuses = (Get-AzVM -Name $vm.Name -ResourceGroupName $config.srdImage.build.rg -Status).Statuses.Code -while (-not $statuses.Contains("ProvisioningState/succeeded")) { - $statuses = (Get-AzVM -Name $vm.Name -ResourceGroupName $config.srdImage.build.rg -Status).Statuses.Code - $progress = [math]::min(100, $progress + 1) - Write-Progress -Activity "Deprovisioning status" -Status "$($statuses[0]) $($statuses[1])" -PercentComplete $progress - Start-Sleep 10 -} - - -# Deallocate and generalize. Commands in Powershell are different from the Azure CLI https://docs.microsoft.com/en-us/azure/virtual-machines/windows/tutorial-custom-images -# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -Add-LogMessage -Level Info "Deallocating and generalising VM: '$($vm.Name)'. This can take up to 20 minutes..." -$null = Stop-AzVM -ResourceGroupName $config.srdImage.build.rg -Name $vm.Name -Force -Add-LogMessage -Level Info "VM has been stopped" -$null = Set-AzVM -ResourceGroupName $config.srdImage.build.rg -Name $vm.Name -Generalized -Add-LogMessage -Level Info "VM has been generalized" - - -# Create an image from the deallocated VM -# --------------------------------------- -$imageName = "Image$($vm.Name -replace 'Candidate', '')" -Add-LogMessage -Level Info "Preparing to create image $imageName..." -$vm = Get-AzVM -Name $vm.Name -ResourceGroupName $config.srdImage.build.rg -$imageConfig = New-AzImageConfig -Location $config.srdImage.location -SourceVirtualMachineId $vm.ID -$image = New-AzImage -Image $imageConfig -ImageName $imageName -ResourceGroupName $config.srdImage.images.rg -# Apply VM tags to the image -$null = New-AzTag -ResourceId $image.Id -Tag @{"Build commit hash" = $vm.Tags["Build commit hash"] } -# If the image has been successfully created then remove build artifacts -if ($image) { - Add-LogMessage -Level Success "Finished creating image $imageName" - Add-LogMessage -Level Info "Removing residual artifacts of the build process from $($config.srdImage.build.rg)..." - Add-LogMessage -Level Info "... virtual machine: $vmName" - $null = Remove-VirtualMachine -Name $vmName -ResourceGroupName $config.srdImage.build.rg -Force -ErrorAction SilentlyContinue - Add-LogMessage -Level Info "... hard disk: ${vmName}-OS-DISK" - $null = Remove-AzDisk -DiskName $vmName-OS-DISK -ResourceGroupName $config.srdImage.build.rg -Force -ErrorAction SilentlyContinue - Add-LogMessage -Level Info "... network card: $vmName-NIC" - $null = Remove-AzNetworkInterface -Name $vmName-NIC -ResourceGroupName $config.srdImage.build.rg -Force -ErrorAction SilentlyContinue - Add-LogMessage -Level Info "... public IP address: ${vmName}-NIC-PIP" - $null = Remove-AzPublicIpAddress -Name $vmName-NIC-PIP -ResourceGroupName $config.srdImage.build.rg -Force -ErrorAction SilentlyContinue - Add-LogMessage -Level Info "... KeyVault password: ${adminPasswordName}" - Remove-AndPurgeKeyVaultSecret -VaultName $config.srdImage.keyVault.name -SecretName $adminPasswordName -} else { - Add-LogMessage -Level Fatal "Image '$imageName' could not be created!" -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_desktop/setup/Provision_Compute_VM.ps1 b/deployment/secure_research_desktop/setup/Provision_Compute_VM.ps1 deleted file mode 100644 index afed2b3e93..0000000000 --- a/deployment/secure_research_desktop/setup/Provision_Compute_VM.ps1 +++ /dev/null @@ -1,162 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $false, HelpMessage = "Source image (one of 'Ubuntu1804', 'Ubuntu2004' or 'Ubuntu2204' [default]")] - [ValidateSet("Ubuntu1804", "Ubuntu2004", "Ubuntu2204")] - [string]$sourceImage = "Ubuntu2004", - [Parameter(Mandatory = $false, HelpMessage = "VM size to use (e.g. 'Standard_E4_v3'. Using 'default' will use the value from the configuration file)")] - [ValidateSet("default", "Standard_D4_v3", "Standard_E2_v3", "Standard_E4_v3", "Standard_E8_v3", "Standard_F4s_v2", "Standard_F8s_v2", "Standard_H8")] - [string]$vmSize = "default" -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.srdImage.subscription -ErrorAction Stop - - -# Select which VM size to use -# --------------------------- -if ($vmSize -eq "default") { $vmSize = $config.srdImage.build.vm.size } - - -# Select which source URN to base the build on -# -------------------------------------------- -if ($sourceImage -eq "Ubuntu2204") { - Add-LogMessage -Level Warning "Note that '$sourceImage' is not supported by the Azure Log Analytics Agent used to manage automatic updates. Please consider using Ubuntu 20.04." - $baseImageSku = "Ubuntu-22.04" - $shortVersion = "2204" -} elseif ($sourceImage -eq "Ubuntu2004") { - $baseImageSku = "Ubuntu-20.04" - $shortVersion = "2004" -} elseif ($sourceImage -eq "Ubuntu1804") { - Add-LogMessage -Level Warning "Note that '$sourceImage' is out-of-date. Please consider using a newer base Ubuntu version." - $baseImageSku = "Ubuntu-18.04" - $shortVersion = "1804" -} else { - Add-LogMessage -Level Fatal "Did not recognise source image '$sourceImage'!" -} -$buildVmName = "SecureResearchDesktop-Ubuntu${shortVersion}" -$cloudInitTemplate = Get-Content (Join-Path $PSScriptRoot ".." "cloud_init" "cloud-init-buildimage-ubuntu-${shortVersion}.mustache.yaml") -Raw - - -# Create resource groups if they do not exist -# ------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.srdImage.build.rg -Location $config.srdImage.location -$null = Deploy-ResourceGroup -Name $config.srdImage.bootdiagnostics.rg -Location $config.srdImage.location -$null = Deploy-ResourceGroup -Name $config.srdImage.network.rg -Location $config.srdImage.location -$null = Deploy-ResourceGroup -Name $config.srdImage.keyVault.rg -Location $config.srdImage.location - - -# Ensure the Key Vault exists and set its access policies -# ------------------------------------------------------- -$null = Deploy-KeyVault -Name $config.srdImage.keyVault.name -ResourceGroupName $config.srdImage.keyVault.rg -Location $config.srdImage.location -Set-KeyVaultPermissions -Name $config.srdImage.keyVault.name -GroupName $config.azureAdminGroupName - - -# Ensure that VNET and subnet exist -# --------------------------------- -$vnet = Deploy-VirtualNetwork -Name $config.srdImage.build.vnet.name -ResourceGroupName $config.srdImage.network.rg -AddressPrefix $config.srdImage.build.vnet.cidr -Location $config.srdImage.location -$subnet = Deploy-Subnet -Name $config.srdImage.build.subnet.name -VirtualNetwork $vnet -AddressPrefix $config.srdImage.build.subnet.cidr - - -# Ensure that build NSG exists with correct rules and attach it to the build subnet -# --------------------------------------------------------------------------------- -Add-LogMessage -Level Info "Ensure that build NSG '$($config.srdImage.build.nsg.name)' exists..." -$buildNsg = Deploy-NetworkSecurityGroup -Name $config.srdImage.build.nsg.name -ResourceGroupName $config.srdImage.network.rg -Location $config.srdImage.location -# Get list of IP addresses which are allowed to connect to the VM candidates -$existingRule = Get-AzNetworkSecurityRuleConfig -NetworkSecurityGroup $buildNsg | Where-Object { $_.Name -eq "AllowAdminApprovedSshInbound" } -$allowedIpAddresses = @($config.srdImage.build.nsg.allowedIpAddresses) -$allowedIpAddresses += $existingRule ? @($existingRule.SourceAddressPrefix) : @() -$config["buildAdminIpAddresses"] = $allowedIpAddresses | Where-Object { $_ } | Sort-Object | Get-Unique -# Update the NSG and ensure it is connected to the correct subnet -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.srdImage.build.nsg.rules) -Parameters $config -AsHashtable -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $buildNsg -Rules $rules -$subnet = Set-SubnetNetworkSecurityGroup -Subnet $subnet -NetworkSecurityGroup $buildNsg - - -# Load the cloud-init template then add resources and expand mustache placeholders -# -------------------------------------------------------------------------------- -$config["dbeaver"] = @{ - drivers = $(Get-Content -Raw -Path (Join-Path $PSScriptRoot ".." "packages" "dbeaver-driver-versions.json" -Resolve) | ConvertFrom-Json -AsHashtable) -} -$cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path $PSScriptRoot ".." "cloud_init" "resources") -$cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path $PSScriptRoot ".." "packages") -$cloudInitTemplate = Expand-MustacheTemplate -Template $cloudInitTemplate -Parameters $config - - -# Construct build VM parameters -# ----------------------------- -$buildVmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.srdImage.keyVault.name -SecretName $config.keyVault.secretNames.buildImageAdminUsername -DefaultValue "srdbuildadmin" -AsPlaintext -$buildVmBootDiagnosticsAccount = Deploy-StorageAccount -Name $config.srdImage.bootdiagnostics.accountName -ResourceGroupName $config.srdImage.bootdiagnostics.rg -Location $config.srdImage.location -$buildVmName = "Candidate${buildVmName}-$(Get-Date -Format "yyyyMMddHHmm")" -$buildVmNic = Deploy-NetworkInterface -Name "$buildVmName-NIC" -ResourceGroupName $config.srdImage.build.rg -Subnet $subnet -PublicIpAddressAllocation "Static" -Location $config.srdImage.location -$adminPasswordName = "$($config.keyVault.secretNames.buildImageAdminPassword)-${buildVmName}" - - -# Check cloud-init size -# --------------------- -$CloudInitEncodedLength = ($cloudInitTemplate | ConvertTo-Base64).Length -if ($CloudInitEncodedLength / 87380 -gt 0.9) { - Add-LogMessage -Level Warning "The current cloud-init size ($CloudInitEncodedLength Base64 characters) is more than 90% of the limit of 87380 characters!" -} - - -# Deploy the VM -# ------------- -Add-LogMessage -Level Info "Provisioning a new VM image in $($config.srdImage.build.rg) '$($config.srdImage.subscription)'..." -Add-LogMessage -Level Info " VM name: $buildVmName" -Add-LogMessage -Level Info " VM size: $vmSize" -Add-LogMessage -Level Info " Base image: Ubuntu $baseImageSku" -$params = @{ - Name = $buildVmName - Size = $vmSize - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.srdImage.keyVault.name -SecretName $adminPasswordName -DefaultLength 20) - AdminUsername = $buildVmAdminUsername - BootDiagnosticsAccount = $buildVmBootDiagnosticsAccount - CloudInitYaml = $cloudInitTemplate - Location = $config.srdImage.location - NicId = $buildVmNic.Id - OsDiskSizeGb = $config.srdImage.build.vm.diskSizeGb - OsDiskType = $config.srdImage.build.vm.diskType - ResourceGroupName = $config.srdImage.build.rg - ImageSku = $baseImageSku -} -$vm = Deploy-LinuxVirtualMachine @params -NoWait - - -# Tag the VM with the git commit hash -# ----------------------------------- -$null = New-AzTag -ResourceId $vm.Id -Tag @{"Build commit hash" = $(git rev-parse --verify HEAD) } - - -# Log connection details for monitoring this build -# ------------------------------------------------ -$publicIp = (Get-AzPublicIpAddress -ResourceGroupName $config.srdImage.build.rg | Where-Object { $_.Id -Like "*${buildVmName}-NIC-PIP" }).IpAddress -Add-LogMessage -Level Info "This process will take several hours to complete." -Add-LogMessage -Level Info " You can monitor installation progress using: ssh $buildVmAdminUsername@$publicIp" -Add-LogMessage -Level Info " The password for this account can be found in the '${adminPasswordName}' secret in the Azure Key Vault at:" -Add-LogMessage -Level Info " $($config.srdImage.subscription) > $($config.srdImage.keyVault.rg) > $($config.srdImage.keyVault.name)" -Add-LogMessage -Level Info " Once logged in, check the installation progress with: /opt/monitoring/analyse_build.py" -Add-LogMessage -Level Info " The full log file can be viewed with: tail -f -n+1 /var/log/cloud-init-output.log" - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_desktop/setup/Register_Image_In_Gallery.ps1 b/deployment/secure_research_desktop/setup/Register_Image_In_Gallery.ps1 deleted file mode 100644 index 22d609fc63..0000000000 --- a/deployment/secure_research_desktop/setup/Register_Image_In_Gallery.ps1 +++ /dev/null @@ -1,135 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Specify an existing VM image to add to the gallery.")] - [string]$imageName, - [Parameter(Mandatory = $false, HelpMessage = "Override the automatically determined version number. Use with caution.")] - [string]$imageVersion = $null -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-ShmConfig -shmId $shmId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.srdImage.subscription -ErrorAction Stop - - -# Useful constants -# ---------------- -$supportedImages = @("SecureResearchDesktop-Ubuntu") - - -# Ensure that gallery resource group exists -# ----------------------------------------- -$null = Deploy-ResourceGroup -Name $config.srdImage.gallery.rg -Location $config.srdImage.location - - -# Ensure that image gallery exists -# -------------------------------- -$null = Get-AzGallery -Name $config.srdImage.gallery.name -ResourceGroupName $config.srdImage.gallery.rg -ErrorVariable notExists -ErrorAction SilentlyContinue -if ($notExists) { - Add-LogMessage -Level Info "Creating image gallery $($config.srdImage.gallery.name)..." - $null = New-AzGallery -GalleryName $config.srdImage.gallery.name -ResourceGroupName $config.srdImage.gallery.rg -Location $config.srdImage.location -} - - -# Set up list of image definitions we want to support -# --------------------------------------------------- -foreach ($supportedImage in $supportedImages) { - $null = Get-AzGalleryImageDefinition -GalleryName $config.srdImage.gallery.name -ResourceGroupName $config.srdImage.gallery.rg -Name $supportedImage -ErrorVariable notExists -ErrorAction SilentlyContinue - if ($notExists) { - Add-LogMessage -Level Info "Creating image definition $supportedImage..." - $offer = ($supportedImage -Split "-")[0] - $sku = ($supportedImage -Split "-")[1] - $null = New-AzGalleryImageDefinition -GalleryName $config.srdImage.gallery.name -ResourceGroupName $config.srdImage.gallery.rg -Name $supportedImage -Publisher Turing -Offer $offer -Sku $sku -Location $config.srdImage.location -OsState generalized -OsType Linux - } -} - - -# Ensure that image exists in the image resource group -# ---------------------------------------------------- -$image = Get-AzResource -ResourceType Microsoft.Compute/images -ResourceGroupName $config.srdImage.images.rg -Name $imageName -if (-not $image) { - Add-LogMessage -Level Error "Could not find an image called '$imageName' in resource group $($config.srdImage.images.rg)" - Add-LogMessage -Level Info "Available images are:" - foreach ($image in Get-AzResource -ResourceType Microsoft.Compute/images -ResourceGroupName $config.srdImage.images.rg) { - Add-LogMessage -Level Info " $($image.Name)" - } - Add-LogMessage -Level Fatal "Could not find an image called '$imageName'!" -} - - -# Check which image definition to use -# ----------------------------------- -Add-LogMessage -Level Info "Checking whether $($image.Name) is a supported image..." -$imageDefinition = $supportedImages | Where-Object { $image.Name -Like "*$_*" } | Select-Object -First 1 -$majorVersion = ($image.Name -split "-")[1].Replace("Ubuntu", "").Substring(0, 2) -$minorVersion = ($image.Name -split "-")[1].Replace("Ubuntu", "").Substring(2, 2) -if (-not ($imageDefinition -and $majorVersion -and $minorVersion)) { - Add-LogMessage -Level Fatal "Could not identify $($image.Name) as a supported image" -} - - -# Determine the appropriate image version -# --------------------------------------- -Add-LogMessage -Level Info "[ ] Determining appropriate image version..." -if (-not $imageVersion) { - $baseImageVersion = "${majorVersion}.${minorVersion}.$(Get-Date -Format "yyyyMMdd")" - $mostRecentImageVersion = Get-AzGalleryImageVersion -ResourceGroupName $config.srdImage.gallery.rg -GalleryName $config.srdImage.gallery.name -GalleryImageDefinitionName $imageDefinition | Where-Object { $_.Name -Like "${baseImageVersion}*" } | ForEach-Object { $_.Name } | Sort-Object -Descending | Select-Object -First 1 - if ($mostRecentImageVersion) { - $imageVersion = "${majorVersion}.${minorVersion}.$([int]($mostRecentImageVersion.Split('.')[2]) + 1)" - } else { - $imageVersion = "${baseImageVersion}00" - } -} -Add-LogMessage -Level Success "Image version '$imageVersion' will be used" - - -# Create the image as a new version of the appropriate existing registered version -# -------------------------------------------------------------------------------- -$targetRegions = @( - @{Name = "Central US"; ReplicaCount = 1 }, - @{Name = "UK South"; ReplicaCount = 1 }, - @{Name = "UK West"; ReplicaCount = 1 }, - @{Name = "West Europe"; ReplicaCount = 1 } -) -Add-LogMessage -Level Info "[ ] Preparing to replicate $($image.Name) across $($targetRegions.Length) regions as version $imageVersion of $imageDefinition..." -Add-LogMessage -Level Warning "Please note, this may take around one hour to complete" -$null = New-AzGalleryImageVersion -GalleryImageDefinitionName $imageDefinition ` - -GalleryImageVersionName "$imageVersion" ` - -GalleryName $config.srdImage.gallery.name ` - -Location $config.srdImage.location ` - -ResourceGroupName $config.srdImage.gallery.rg ` - -Source $image.Id.ToString() ` - -TargetRegion $targetRegions ` - -AsJob -$job = Get-Job -Command New-AzGalleryImageVersion | Sort-Object { $_.PSBeginTime } -Descending | Select-Object -First 1 -while ($job.State -ne "Completed") { - $progress = [math]::min(100, $progress + 1) - Write-Progress -Activity "Replication status" -Status $job.State -PercentComplete $progress - Start-Sleep 60 -} -$resource = Get-AzResource -ResourceGroupName $config.srdImage.gallery.rg | Where-Object { $_.Name -match "${imageDefinition}/${imageVersion}" } -$null = New-AzTag -ResourceId $resource.Id -Tag @{"Build commit hash" = $image.Tags["Build commit hash"] } - - -# List replication results -# ------------------------ -Add-LogMessage -Level Info "Result of replication..." -foreach ($imageStatus in Get-AzGalleryImageVersion -ResourceGroupName $config.srdImage.gallery.rg -GalleryName $config.srdImage.gallery.name -GalleryImageDefinitionName $imageDefinition -Name "$imageVersion") { - Add-LogMessage -Level Info ($imageStatus | Out-String) -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/arm_templates/sre-mssql2019-server-template.json b/deployment/secure_research_environment/arm_templates/sre-mssql2019-server-template.json deleted file mode 100644 index f515fc9966..0000000000 --- a/deployment/secure_research_environment/arm_templates/sre-mssql2019-server-template.json +++ /dev/null @@ -1,295 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "parameters": { - "administratorPassword": { - "type": "securestring", - "metadata": { - "description": "Password for domain administrator" - } - }, - "administratorUsername": { - "type": "string", - "metadata": { - "description": "Username for domain administrator" - } - }, - "bootDiagnosticsAccountName": { - "type": "string", - "metadata": { - "description": "Name of storage account used for boot diagnostics" - } - }, - "privateIpAddress": { - "type": "string", - "defaultValue": "10.x.x.x", - "metadata": { - "description": "IP Address for VM" - } - }, - "sqlDbAdministratorPassword": { - "type": "string", - "metadata": { - "description": "Password for SQL AuthUpdate User" - } - }, - "sqlDbAdministratorUsername": { - "type": "string", - "metadata": { - "description": "Username for SQL AuthUpdate User" - } - }, - "sqlServerConnectionPort": { - "type": "string", - "metadata": { - "description": "Which port to use for connections" - } - }, - "sqlServerName": { - "type": "string", - "metadata": { - "description": "Name for SQL Server VM" - } - }, - "sqlServerEdition": { - "type": "string", - "defaultValue": "sqldev-gen2", - "allowedValues": [ - "sqldev-gen2", - "enterprise" - ], - "metadata": { - "description": "SQL Server Edition to be installed" - } - }, - "virtualNetworkName": { - "type": "string", - "metadata": { - "description": "Name of virtual network to provision these VMs" - } - }, - "virtualNetworkResourceGroupName": { - "type": "string", - "metadata": { - "description": "Name of resource group that is associated with the virtual network above" - } - }, - "virtualNetworkSubnetName": { - "type": "string", - "metadata": { - "description": "Name of subnet where you want to provision this VM" - } - }, - "vmDataDiskSizeGb": { - "type": "int", - "metadata": { - "description": "Size of NPS OS disk in GB" - } - }, - "vmDataDiskType": { - "type": "string", - "metadata": { - "description": "Type of data disk" - } - }, - "vmOsDiskSizeGb": { - "type": "string", - "metadata": { - "description": "Size of OS disk in GB" - } - }, - "vmOsDiskType": { - "type": "string", - "metadata": { - "description": "Type of OS disk" - } - }, - "vmSize": { - "type": "string", - "defaultValue": "Standard_GS1", - "metadata": { - "description": "Size of VM" - } - } - }, - "variables": { - "nic": "[concat(parameters('sqlServerName'), '-', 'NIC')]", - "datadisk": "[concat(parameters('sqlServerName'), '-DATA-DISK')]", - "sqlDisksLUNs": ["0"], - "vnetID": "[resourceId(parameters('virtualNetworkResourceGroupName'), 'Microsoft.Network/virtualNetworks', parameters('virtualNetworkName'))]", - "subnetId": "[concat(variables('vnetID'),'/subnets/', parameters('virtualNetworkSubnetName'))]" - - }, - "resources": [ - { - "type": "Microsoft.Compute/virtualMachines", - "name": "[parameters('sqlServerName')]", - "apiVersion": "2021-11-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "hardwareProfile": { - "vmSize": "[parameters('vmSize')]" - }, - "storageProfile": { - "imageReference": { - "publisher": "MicrosoftSQLServer", - "offer": "SQL2022-WS2022", - "sku": "[parameters('sqlServerEdition')]", - "version": "latest" - }, - "osDisk": { - "osType": "Windows", - "name": "[concat(parameters('sqlServerName'),'-OS-DISK')]", - "createOption": "FromImage", - "caching": "ReadWrite", - "writeAcceleratorEnabled": false, - "managedDisk": { - "storageAccountType": "[parameters('vmOsDiskType')]" - }, - "diskSizeGB": "[parameters('vmOsDiskSizeGb')]" - }, - "dataDisks": [ - { - "lun": 0, - "name": "[variables('datadisk')]", - "createOption": "Empty", - "caching": "None", - "writeAcceleratorEnabled": false, - "managedDisk": { - "storageAccountType": "[parameters('vmDataDiskType')]" - }, - "diskSizeGB": "[parameters('vmDataDiskSizeGb')]" - } - ] - }, - "osProfile": { - "computerName": "[parameters('sqlServerName')]", - "adminUsername": "[parameters('administratorUsername')]", - "adminPassword": "[parameters('administratorPassword')]", - "windowsConfiguration": { - "provisionVMAgent": true, - "enableAutomaticUpdates": true - }, - "secrets": [] - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', variables('nic'))]", - "properties": { - "primary": true - } - } - ] - }, - "diagnosticsProfile": { - "bootDiagnostics": { - "enabled": true, - "storageUri": "[concat('https://', parameters('bootDiagnosticsAccountName'), '.blob.core.windows.net/')]" - } - } - }, - "dependsOn": [ - "[resourceId('Microsoft.Network/networkInterfaces', variables('nic'))]" - ] - }, - { - "type": "Microsoft.Network/networkInterfaces", - "name": "[variables('nic')]", - "apiVersion": "2020-11-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "ipConfigurations": [ - { - "name": "ipconfig1", - "properties": { - "privateIPAddress": "[parameters('privateIpAddress')]", - "privateIPAllocationMethod": "Static", - "subnet": { - "id": "[variables('subnetId')]" - }, - "primary": true, - "privateIPAddressVersion": "IPv4" - } - } - ], - "dnsSettings": { - "dnsServers": [], - "appliedDnsServers": [] - }, - "enableAcceleratedNetworking": false, - "enableIPForwarding": false, - "primary": true, - "tapConfigurations": [] - }, - "dependsOn": [] - }, - { - "type": "Microsoft.Compute/virtualMachines/extensions", - "name": "[concat(parameters('sqlServerName'), '/', 'bginfo')]", - "apiVersion": "2021-11-01", - "location": "[resourceGroup().location]", - "scale": null, - "properties": { - "autoUpgradeMinorVersion": true, - "publisher": "Microsoft.Compute", - "type": "bginfo", - "typeHandlerVersion": "2.1" - }, - "dependsOn": [ - "[resourceId('Microsoft.Compute/virtualMachines', parameters('sqlServerName'))]" - ] - }, - { - "name": "[parameters('sqlServerName')]", - "type": "Microsoft.SqlVirtualMachine/SqlVirtualMachines", - "apiVersion": "2021-11-01-preview", - "location": "[resourceGroup().location]", - "properties": { - "sqlServerLicenseType": "PAYG", - "sqlManagement": "Full", - "autoPatchingSettings": { - "dayOfWeek": "Sunday", - "enable": true, - "maintenanceWindowDuration": 60, - "maintenanceWindowStartingHour": 2 - }, - "keyVaultCredentialSettings": { - "enable": false, - "credentialName": "" - }, - "serverConfigurationsManagementSettings": { - "additionalFeaturesServerConfigurations": { - "isRServicesEnabled": false - }, - "sqlConnectivityUpdateSettings": { - "connectivityType": "Private", - "port": "[parameters('sqlServerConnectionPort')]", - "sqlAuthUpdateUserName": "[parameters('sqlDbAdministratorUsername')]", - "sqlAuthUpdatePassword": "[parameters('sqlDbAdministratorPassword')]" - } - }, - "storageConfigurationSettings": { - "diskConfigurationType": "NEW", - "sqlDataSettings": { - "defaultFilePath": "F:\\data", - "luns": "[variables('sqlDisksLUNs')]" - }, - "sqlLogSettings": { - "defaultFilePath": "F:\\log", - "luns": "[variables('sqlDisksLUNs')]" - }, - "sqlTempDbSettings": { - "defaultFilePath": "D:\\tempDb" - }, - "storageWorkloadType": "GENERAL" - }, - "virtualMachineResourceId": "[resourceId('Microsoft.Compute/virtualMachines', parameters('sqlServerName'))]" - }, - "dependsOn": [ - "[resourceId('Microsoft.Compute/virtualMachines', parameters('sqlServerName'))]" - ] - } - ] -} \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/cloud-init-codimd.mustache.yaml b/deployment/secure_research_environment/cloud_init/cloud-init-codimd.mustache.yaml deleted file mode 100644 index d358f75e25..0000000000 --- a/deployment/secure_research_environment/cloud_init/cloud-init-codimd.mustache.yaml +++ /dev/null @@ -1,248 +0,0 @@ -#cloud-config - -# Create files -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{shm.monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{shm.monitoring.updateServers.schedule.daily_definition_updates.minute}} {{shm.monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/opt/codimd/docker-compose.yml" - permissions: "0400" - content: | - version: '3' - services: - codimd: - depends_on: - - database - image: hackmdio/hackmd:{{sre.webapps.codimd.codimd.dockerVersion}} - environment: - - CMD_ALLOW_ANONYMOUS=false - - CMD_ALLOW_FREEURL=true - - CMD_DB_URL=postgres://codimd:{{codimd.postgresPassword}}@database:5432/codimd - - CMD_EMAIL=false - - CMD_IMAGE_UPLOAD_TYPE=filesystem - - CMD_LDAP_BINDCREDENTIALS={{codimd.ldapSearchUserPassword}} - - CMD_LDAP_BINDDN={{codimd.ldapSearchUserDn}} - - CMD_LDAP_PROVIDERNAME={{shm.domain.netbiosName}} - - CMD_LDAP_SEARCHBASE={{shm.domain.ous.researchUsers.path}} - - CMD_LDAP_SEARCHFILTER={{{codimd.ldapUserFilter}}} - - CMD_LDAP_URL=ldap://{{shm.dc.fqdn}} - - CMD_LDAP_USERIDFIELD=sAMAccountName - - CMD_USECDN=false - ports: - # Map port 80 (external) to port 3000 (internal) - - 80:3000 - networks: - dockernet: - restart: always - volumes: - - /data/codimd:/codimd/public/uploads - database: - image: postgres:{{sre.webapps.codimd.postgres.dockerVersion}} - environment: - - POSTGRES_USER=codimd - - POSTGRES_PASSWORD={{codimd.postgresPassword}} - - POSTGRES_DB=codimd - networks: - dockernet: - restart: always - volumes: - - /data/postgresql:/var/lib/postgresql/data - networks: - dockernet: - - - path: "/opt/configuration/set_dns.sh" - permissions: "0500" - content: | - {{set_dns.mustache.sh}} - - - path: "/opt/configuration/docker_pat.txt" - permissions: "0400" - content: {{shm.dockerPassword}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{sre.time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#shm.time.ntp.serverAddresses}} - - {{.}} - {{/shm.time.ntp.serverAddresses}} - -# Configure apt repositories -apt: - preserve_sources_list: true - -# Install necessary apt packages -packages: - - apt-transport-https - - auditd - - ca-certificates - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - curl - - docker.io - - docker-compose-v2 - - ldap-utils - - software-properties-common -package_update: true -package_upgrade: true - -# We know that exactly one data disk will be attached to this VM and it will be attached as lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /data, ext4, "defaults,nofail"] - -# Set hostname -fqdn: {{sre.webapps.codimd.fqdn}} -hostname: {{sre.webapps.codimd.fqdn}} - -# Add the SRE admin (default) and codimddaemon users -users: - - default - - name: codimddaemon - lock_passwd: true # Lock the password to disable password login - sudo: false # This user will not have sudo privileges - -# Run other commands -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Check server settings - - echo ">=== DNS ===<" - - /opt/configuration/set_dns.sh - - echo ">=== Hostname ===<" - - hostnamectl - - echo ">=== Date/time ===<" - - timedatectl - - # Configuring attached disks - - echo ">=== Configuring attached disks... ===<" - - mkdir -p /data/postgresql - - mkdir -p /data/codimd - - chown -R 1500:1500 /data/codimd # allow the 'codimd' user inside the docker container to access this volume - - ls -alh /data/ - - # Ensure that Docker is running and enabled at startup - - echo ">=== Configuring Docker... ===<" - - systemctl enable docker - - systemctl start docker - - sleep 1m - - systemctl status docker - - docker --version - - docker compose version - - # Set up the codimddaemon user - - echo ">=== Configuring codimddaemon user... ===<" - - groupadd docker 2> /dev/null - - usermod -aG docker codimddaemon - - newgrp docker - - chown -R codimddaemon:codimddaemon /opt/codimd - - ls -alh /opt/codimd - - # Deploy CodiMD using Docker - - echo ">=== Deploying CodiMD with Docker... ===<" - - chown codimddaemon:codimddaemon /opt/configuration/docker_pat.txt # Ensure that the file is owned by the codimddaemon user - - su codimddaemon -c "cat /opt/configuration/docker_pat.txt | docker login --username '{{shm.dockerAccount}}' --password-stdin - && docker compose -f /opt/codimd/docker-compose.yml up -d" - - # Wait for deployment to finish - - | - while true; do - CODIMD_STATUS=$(docker compose -f /opt/codimd/docker-compose.yml logs codimd | grep "HTTP Server listening at 0.0.0.0:3000") - POSTGRES_STATUS=$(docker compose -f /opt/codimd/docker-compose.yml logs database | grep "ready to accept connections") - if [ "$CODIMD_STATUS" != "" ] && [ "$POSTGRES_STATUS" != "" ]; then - break - fi - sleep 5 - done - - docker compose -f /opt/codimd/docker-compose.yml logs - - # Print a final message - - echo ">=== Deploying CodiMD with Docker is complete ===<" - - docker compose -f /opt/codimd/docker-compose.yml ps - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_environment/cloud_init/cloud-init-gitlab.mustache.yaml b/deployment/secure_research_environment/cloud_init/cloud-init-gitlab.mustache.yaml deleted file mode 100644 index 845fa9ad7a..0000000000 --- a/deployment/secure_research_environment/cloud_init/cloud-init-gitlab.mustache.yaml +++ /dev/null @@ -1,210 +0,0 @@ -#cloud-config - -# Create files -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{shm.monitoring.updateServers.linux.ip}}:8000"; - Acquire::https::Proxy "http://{{shm.monitoring.updateServers.linux.ip}}:8000"; - - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{shm.monitoring.updateServers.schedule.daily_definition_updates.minute}} {{shm.monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/gitlab/gitlab.rb" - permissions: "0600" - content: | - external_url 'http://{{sre.webapps.gitlab.ip}}' - gitlab_rails['ldap_enabled'] = true - gitlab_rails['ldap_servers'] = YAML.load <<-'EOS' - main: # 'main' is the GitLab 'provider ID' of this LDAP server - label: 'LDAP' - host: '{{shm.dc.hostname}}.{{shm.domain.fqdn}}' - port: 389 - uid: 'sAMAccountName' - method: 'plain' # "tls" or "ssl" or "plain" - bind_dn: '{{gitlab.ldapSearchUserDn}}' - password: '{{gitlab.ldapSearchUserPassword}}' - active_directory: true - allow_username_or_email_login: true - block_auto_created_users: false - base: '{{shm.domain.ous.researchUsers.path}}' - user_filter: '{{{gitlab.ldapUserFilter}}}' - attributes: - username: ['uid', 'userid', 'sAMAccountName'] - email: ['mail', 'email', 'userPrincipalName'] - name: 'cn' - first_name: 'givenName' - last_name: 'sn' - EOS - git_data_dirs({ "default" => { "path" => "/data" } }) - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/opt/configuration/set_dns.sh" - permissions: "0500" - content: | - {{set_dns.mustache.sh}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{sre.time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#shm.time.ntp.serverAddresses}} - - {{.}} - {{/shm.time.ntp.serverAddresses}} - -# Configure apt repositories -apt: - preserve_sources_list: true - sources: - gitlab.list: - source: "deb https://packages.gitlab.com/gitlab/gitlab-ce/ubuntu focal main" - keyid: F6403F6544A38863DAA0B6E03F01618A51312F3F # GitLab B.V. (package repository signing key) - -# Install necessary apt packages -packages: - - apt-transport-https - - auditd - - ca-certificates - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - curl - - gitlab-ce - - gnupg - - ldap-utils - - openssh-server - - postfix -package_update: true -package_upgrade: true - -# We know that exactly one data disk will be attached to this VM and it will be attached as lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /data, ext4, "defaults,nofail"] - -# Set hostname -fqdn: {{sre.webapps.gitlab.fqdn}} -hostname: {{sre.webapps.gitlab.fqdn}} - -# Add the SRE admin (default) user -users: - - default - -# Run other commands -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # As packages.gitlab.com forwards you to a CDN we would need to allow access to *.cloudfront.net - # As we do not want this, we remove this repository before continuing deployment - - rm /etc/apt/sources.list.d/gitlab.list - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Check server settings - - echo ">=== DNS ===<" - - /opt/configuration/set_dns.sh - - echo ">=== Hostname ===<" - - hostnamectl - - echo ">=== Date/time ===<" - - timedatectl - - # Enable custom GitLab settings and run an initial configuration - - echo ">=== Configuring GitLab... ===<" - - gitlab-ctl reconfigure - - # Set root password and don't prompt for it to be reset when web app first loaded - - | - echo "user = User.find_by(username: 'root');user.password=user.password_confirmation='{{gitlab.rootPassword}}';user.password_automatically_set=false;user.save!;exit;" | gitlab-rails console -e production - - # Turn off user account creation - - | - gitlab-rails runner "ApplicationSetting.last.update_attributes(signup_enabled: false)" - - # Restrict login to SHM domain (must be done AFTER GitLab update) - - | - gitlab-rails runner "ApplicationSetting.last.update_attributes(domain_allowlist: ['{{shm.domain.fqdn}}'])" - - # Reload GitLab configuration and restart GitLab - - echo ">=== Restarting GitLab with updated configuration... ===<" - - gitlab-ctl reconfigure - - gitlab-ctl restart - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_environment/cloud_init/cloud-init-guacamole.mustache.yaml b/deployment/secure_research_environment/cloud_init/cloud-init-guacamole.mustache.yaml deleted file mode 100644 index 3461d7cde6..0000000000 --- a/deployment/secure_research_environment/cloud_init/cloud-init-guacamole.mustache.yaml +++ /dev/null @@ -1,225 +0,0 @@ -#cloud-config - -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{shm.monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{shm.monitoring.updateServers.schedule.daily_definition_updates.minute}} {{shm.monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/cron.d/ldap-db-sync" - permissions: "0644" - content: | - # Run user synchronisation every 10 minutes - */10 * * * * root /opt/pg-ldap-sync/synchronise_database.sh - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/opt/guacamole/docker-compose.yaml" - permissions: "0400" - content: | - {{guacamole_docker_compose.mustache.yaml}} - - - path: "/opt/nginx/nginx.conf" - permissions: "0400" - content: | - {{guacamole_nginx_nginx.conf}} - - - path: "/opt/nginx/guacamole.conf" - permissions: "0400" - content: | - {{guacamole_nginx_guacamole.mustache.conf}} - - - path: "/opt/pg-ldap-sync/configuration.yaml" - permissions: "0400" - content: | - {{guacamole_pg_ldap_sync.mustache.yaml}} - - - path: "/opt/pg-ldap-sync/synchronise_database.sh" - permissions: "0500" - content: | - {{guacamole_synchronise_database.sh}} - - - path: "/opt/postgresql/scripts/db_permissions.sql" - permissions: "0444" - content: | - {{guacamole_db_permissions.sql}} - - - path: "/opt/postgresql/scripts/db_update.sql" - permissions: "0444" - content: | - {{guacamole_db_update.mustache.sql}} - - - path: "/opt/postgresql/data/connections.csv" - permissions: "0444" - content: | - SRD Main;{{guacamole.ipAddressFirstSRD}} - - - path: "/opt/configuration/docker_pat.txt" - permissions: "0400" - content: {{shm.dockerPassword}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{sre.time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#shm.time.ntp.serverAddresses}} - - {{.}} - {{/shm.time.ntp.serverAddresses}} - -# Configure apt repositories -apt: - preserve_sources_list: true - -# Install necessary apt packages -packages: - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - docker.io - - docker-compose-v2 - - gcc - - libpq-dev - - make - - ruby - - ruby-dev -package_update: true -package_upgrade: true - -# Add the SRE admin (default) and guacamoledaemon users -users: - - default - - name: guacamoledaemon - lock_passwd: true # Lock the password to disable password login - sudo: false # This user will not have sudo privileges - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Install pg-ldap-sync - - echo ">=== Installing pg-ldap-sync... ===<" - - gem install pg-ldap-sync - - # Ensure that Docker is running and enabled at startup - - echo ">=== Configuring Docker... ===<" - - systemctl enable docker - - systemctl start docker - - sleep 1m - - systemctl status docker - - docker --version - - docker compose version - - # Set up the guacamoledaemon user - - echo ">=== Configuring guacamoledaemon user... ===<" - - groupadd docker 2> /dev/null - - usermod -aG docker guacamoledaemon - - newgrp docker - - chown -R guacamoledaemon:guacamoledaemon /opt/guacamole /opt/nginx /opt/postgresql - - ls -alh /opt/guacamole - - # Generate some self-signed certificates that will later be replaced by real ones - - echo ">=== Creating self-signed SSL certificates... ===<" - - mkdir -p /opt/ssl/conf/live/{{sre.domain.fqdn}} - - openssl rand -out ${HOME}/.rnd -hex 256 - - openssl req -nodes -newkey rsa:2048 -new -x509 -days 1 -keyout /opt/ssl/conf/live/{{sre.domain.fqdn}}/privkey.pem -out /opt/ssl/conf/live/{{sre.domain.fqdn}}/fullchain.pem -subj '/CN=localhost' - - ls -alh /opt/ssl/conf/live/{{sre.domain.fqdn}} - - # Download the Let's Encrypt base certificate - - LETS_ENCRYPT_CERTIFICATE_PATH=/opt/ssl/lets-encrypt-r3.pem - - if [ ! -e $LETS_ENCRYPT_CERTIFICATE_PATH ]; then wget -O $LETS_ENCRYPT_CERTIFICATE_PATH https://letsencrypt.org/certs/lets-encrypt-r3.pem; fi - - # RFC-7919 recommended: https://tools.ietf.org/html/rfc7919 - - echo ">=== Generating 4096-bit server-side Diffie-Hellman key... ===<" - - openssl dhparam -dsaparam -out /opt/ssl/dhparam.pem 4096 - - # Deploy Guacamole using Docker - - echo ">=== Deploying Guacamole with Docker... ===<" - - chown guacamoledaemon:guacamoledaemon /opt/configuration/docker_pat.txt # Ensure that the file is owned by the codimddaemon user - - su guacamoledaemon -c "cat /opt/configuration/docker_pat.txt | docker login --username {{shm.dockerAccount}} --password-stdin" - - su guacamoledaemon -c "docker compose -f /opt/guacamole/docker-compose.yaml up -d" - - # Generate the necessary SQL config for the local PostgreSQL database and run it - - echo ">=== Configuring local PostgreSQL database... ===<" - - su guacamoledaemon -c "docker compose -f /opt/guacamole/docker-compose.yaml exec -T guacamole /opt/guacamole/bin/initdb.sh --postgresql" > /opt/postgresql/scripts/db_init.sql - - echo "CREATE ROLE ldap_users;" >> /opt/postgresql/scripts/db_init.sql - - echo "CREATE ROLE ldap_groups;" >> /opt/postgresql/scripts/db_init.sql - - ls -alh /opt/postgresql/scripts/db_init.sql - - su guacamoledaemon -c "docker compose -f /opt/guacamole/docker-compose.yaml exec -T postgres psql -U guacamole -f /scripts/db_init.sql" - - # Add initial set of LDAP users with pg-ldap-sync - - echo ">=== Adding LDAP users with pg-ldap-sync... ===<" - - /opt/pg-ldap-sync/synchronise_database.sh - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_environment/cloud_init/cloud-init-postgres.mustache.yaml b/deployment/secure_research_environment/cloud_init/cloud-init-postgres.mustache.yaml deleted file mode 100644 index cdbbb62b41..0000000000 --- a/deployment/secure_research_environment/cloud_init/cloud-init-postgres.mustache.yaml +++ /dev/null @@ -1,341 +0,0 @@ -#cloud-config - -# Update package database on first boot (ie. run `apt-get update`) -package_update: true - -# Upgrade installed packages on first boot (ie. run `apt-get upgrade`) -package_upgrade: true - -apt: - # Preserves the existing /etc/apt/sources.list - preserve_sources_list: true - - # Add repositories - sources: - postgresql.list: - source: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" - keyid: 7FCC7D46ACCC4CF8 - -# We know that exactly one data disk will be attached to this VM and it will therefore use lun0 -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true - -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - partition: auto - filesystem: ext4 - -mounts: - - [/dev/disk/azure/scsi1/lun0-part1, /data, ext4, "defaults,nofail"] - -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{shm.monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - {{clamd.conf}} - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{shm.monitoring.updateServers.schedule.daily_definition_updates.minute}} {{shm.monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/cron.d/ldap-db-sync" - permissions: "0644" - content: | - # Run user synchronisation every 10 minutes - */10 * * * * postgres /usr/local/bin/pg_ldap_sync -vvv -c /etc/postgresql/12/main/pg-ldap-sync.yaml 2>&1 | logger -t SafeHaven - - - path: "/etc/cron.d/db-role-update" - permissions: "0644" - content: | - # Trigger role updates every 10 minutes by writing a comment to the data schema - # Run at 1, 11, 21, 31, 41, 51 minutes past the hour so always after the user sync - 1-59/10 * * * * postgres /usr/bin/psql -q -c "comment on schema data is 'Data schema can only be modified by Data Admins or System Admins';" - - - path: "/etc/cron.d/runonce" - permissions: "0644" - content: | - # Run on every reboot - @reboot root /usr/local/bin/runonce | logger -t SafeHaven - - - path: "/etc/domain-join.secret" - permissions: "0400" - content: | - {{{postgres.domainJoinPassword}}} - - - path: "/etc/krb5.conf" - permissions: "0444" - content: | - {{krb5.mustache.conf}} - - - path: "/etc/ldap.secret" - permissions: "0400" - content: | - {{{postgres.ldapSearchUserPassword}}} - - - path: "/etc/local/runonce.d/00-join-domain.sh" - permissions: "0500" - content: | - #! /bin/bash - echo ">=== Joining domain '{{shm.domain.fqdnLower}}'... ===<" - while [ "$(/usr/sbin/realm discover {{shm.domain.fqdnLower}} 2> /dev/null | grep server-software | awk '{print $2}')" != "active-directory" ]; do echo "Waiting for realm availability..."; sleep 1; done - echo "/opt/configuration/join_domain.sh '{{shm.domain.fqdnLower}}' '{{shm.domain.ous.databaseServers.path}}' '{{shm.users.computerManagers.databaseServers.samAccountName}}' '{{postgres.vmName}}'" - /opt/configuration/join_domain.sh "{{shm.domain.fqdnLower}}" "{{shm.domain.ous.databaseServers.path}}" "{{shm.users.computerManagers.databaseServers.samAccountName}}" "{{postgres.vmName}}" - - - path: "/etc/local/runonce.d/01-generate-keytab.sh" - permissions: "0500" - content: | - {{postgres_generate_keytab.mustache.sh}} - - - path: "/etc/local/runonce.d/02-initial-ldap-sync.sh" - permissions: "0500" - content: | - {{postgres_initial_ldap_sync.sh}} - - - path: "/etc/local/runonce.d/03-set-public-schema-permissions.sh" - permissions: "0500" - content: | - {{postgres_set_public_schema_permissions.sh}} - - - path: "/etc/local/runonce.d/04-list-postgres-users.sh" - permissions: "0500" - content: | - {{postgres_list_postgres_users.sh}} - - - path: "/etc/postgres-user.secret" - permissions: "0600" - content: | - {{{postgres.dbAdminPassword}}} - - - path: "/etc/postgres-service-account.secret" - permissions: "0600" - content: | - {{{postgres.dbServiceAccountPassword}}} - - - path: "/etc/realmd.conf" - permissions: "0400" - content: | - {{realmd.mustache.conf}} - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/opt/configuration/check_dns.sh" - permissions: "0500" - content: | - {{check_dns.sh}} - - - path: "/opt/configuration/check_hostname.sh" - permissions: "0500" - content: | - {{check_hostname.sh}} - - - path: "/opt/configuration/check_ntp.sh" - permissions: "0500" - content: | - {{check_ntp.sh}} - - - path: "/opt/configuration/create-postgres-triggers.sql" - permissions: "0444" - content: | - {{postgres_create_postgres_triggers.mustache.sql}} - - - path: "/opt/configuration/install-postgres-extensions.sql" - permissions: "0444" - content: | - {{postgres_install_postgres_extensions.sql}} - - - path: "/opt/configuration/join_domain.sh" - permissions: "0500" - content: | - {{join_domain.sh}} - - - path: "/opt/configuration/pg-ldap-sync.yaml" - permissions: "0700" - content: | - {{postgres_pg_ldap_sync.mustache.yaml}} - - - path: "/usr/local/bin/runonce" - permissions: "0500" - content: | - {{runonce.sh}} - -# List of packages to install with apt-get -packages: - - adcli - - auditd - - clamav - - clamav-base - - clamav-daemon - - clamav-freshclam - - clamav-unofficial-sigs - - gcc - - krb5-user - - ldap-utils - - libpq-dev - - make - - postgresql-12 - - postgresql-12-pgrouting - - postgresql-12-postgis-3-scripts - - postgresql-contrib-12 - - realmd - - ruby - - ruby-dev - - sssd - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{sre.time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#shm.time.ntp.serverAddresses}} - - {{.}} - {{/shm.time.ntp.serverAddresses}} - - -# Set hostname -# We need the FQDN in the hostname as without it we cannot set service principals when joining the Windows domain -fqdn: "{{postgres.vmName}}.{{shm.domain.fqdnLower}}" -hostname: {{postgres.vmName}} -manage_etc_hosts: true -prefer_fqdn_over_hostname: true - -runcmd: - # Suppress apt prompts and warning messages - - DEBIAN_FRONTEND=noninteractive - - export DEBIAN_FRONTEND - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Configure services for realm joining - - echo ">=== Configuring domain joining settings... ===<" - - OSVERSION=$(lsb_release -a 2> /dev/null | grep "Description:" | awk '{print $3}') - - sed -i -E "s|(os-version = ).*|\1$OSVERSION|" /etc/realmd.conf - # Check DNS - - echo "Checking DNS..." - - /opt/configuration/check_dns.sh - # Check hostname - - echo "Checking hostname..." - - /opt/configuration/check_hostname.sh - - systemctl enable sssd - # Check Kerberos settings - - echo "Checking Kerberos config" - - grep -v -e '^[[:space:]]*$' /etc/krb5.conf | sed 's|^| /etc/krb5.conf |' - # Check timezone and NTP server - - echo "Current time synchronisation status..." - - /opt/configuration/check_ntp.sh - - # Configure the postgres database - - echo ">=== Configure postgres database... ===<" - # Change the password for the postgres user - - sudo -i -u postgres psql -tc "ALTER USER postgres WITH PASSWORD '$(cat /etc/postgres-user.secret)';" - # Use external disk for storing data - - systemctl stop postgresql@12-main - - mkdir -p /data/postgresql - - chmod 0700 /data/postgresql - - chown -R postgres:postgres /data/ - - mv /var/lib/postgresql/12/main/* /data/postgresql - - sed -i "s|data_directory = .*|data_directory = '/data/postgresql'|g" /etc/postgresql/12/main/postgresql.conf - - grep data_directory /etc/postgresql/12/main/postgresql.conf - # Allow login to this database from other VMs - - sed -i "s|#listen_addresses|listen_addresses = '*'\n#listen_addresses|g" /etc/postgresql/12/main/postgresql.conf - - grep listen_addresses /etc/postgresql/12/main/postgresql.conf - # Restart the server with all the above changes - - systemctl start postgresql@12-main - - systemctl status postgresql@12-main - - # Create the required LDAP groups on the postgres server - - echo ">=== Creating necessary LDAP roles... ===<" - - | - for ROLE in "ldap_users" "ldap_groups"; do - ROLE_EXISTS=$(sudo -i -u postgres psql -tc "SELECT 1 FROM pg_roles WHERE rolname='${ROLE}'" | grep 1) - if [ "$ROLE_EXISTS" = "" ]; then - echo "Creating ${ROLE} role" - sudo -i -u postgres psql -q -c "CREATE ROLE ${ROLE};" - else - echo "${ROLE} role already exists" - fi - done - - # Install postgres extensions - - echo ">=== Installing postgis schema and extensions... ===<" - - sudo -i -u postgres psql -f /opt/configuration/install-postgres-extensions.sql - - # Install pg-ldap-sync for adding LDAP users - - echo ">=== Configuring pg-ldap-sync... ===<" - # Add users - - echo "Adding LDAP users with pg-ldap-sync..." - - gem install pg-ldap-sync - - mv /opt/configuration/pg-ldap-sync.yaml /etc/postgresql/12/main/pg-ldap-sync.yaml - - chown postgres:postgres /etc/postgresql/12/main/pg-ldap-sync.yaml - - # Allow Kerberos login - disable all other login types except the dbadmin on localhost - - echo ">=== Restricting login types... ===<" - - echo "local all postgres peer\nhost all all {{sre.network.vnet.subnets.compute.cidr}} gss include_realm=0 krb_realm={{shm.domain.fqdnUpper}}" > /etc/postgresql/12/main/pg_hba.conf - - cat /etc/postgresql/12/main/pg_hba.conf - -final_message: "System setup through cloud-init is finished. Configuration took $UPTIME seconds" - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_environment/cloud_init/cloud-init-srd.mustache.yaml b/deployment/secure_research_environment/cloud_init/cloud-init-srd.mustache.yaml deleted file mode 100644 index 52bac4e00f..0000000000 --- a/deployment/secure_research_environment/cloud_init/cloud-init-srd.mustache.yaml +++ /dev/null @@ -1,611 +0,0 @@ -#cloud-config - -# Upgrade installed packages on first boot (ie. run `apt-get upgrade`) -package_upgrade: false - -disk_setup: - /dev/disk/azure/scsi1/lun0: - table_type: gpt - layout: true - overwrite: true - -fs_setup: - - device: /dev/disk/azure/scsi1/lun0 - filesystem: ext4 - partition: auto - -# Note that we do not include the blobfuse mounts here as these are controlled by systemd -mounts: - - ["/dev/disk/azure/scsi1/lun0-part1", /scratch, ext4, "rw,user,exec"] - - ["{{sre.storage.userdata.account.name}}.file.core.windows.net:/{{sre.storage.userdata.account.name}}/shared", /shared, nfs, "_netdev,sec=sys"] - - ["{{sre.storage.userdata.account.name}}.file.core.windows.net:/{{sre.storage.userdata.account.name}}/home", /home, nfs, "_netdev,sec=sys"] - -write_files: - - path: "/etc/apt/apt.conf.d/00proxy" - permissions: "0444" - content: | - Acquire::http::Proxy "http://{{shm.monitoring.updateServers.linux.ip}}:8000"; - - - path: "/etc/audit/rules.d/audit.rules" - permissions: "0400" - content: | - {{audit.rules}} - - - path: "/etc/bash.bashrc" - permissions: "0644" - append: true - content: | - # enable bash completion in interactive shells - if [ ! $(shopt -oq posix) ]; then - if [ -f /usr/share/bash-completion/bash_completion ]; then - . /usr/share/bash-completion/bash_completion - elif [ -f /etc/bash_completion ]; then - . /etc/bash_completion - fi - fi - - - path: "/etc/clamav/clamd.conf" - permissions: "0644" - append: true - content: | - # Scan all local and remote mounts on access - OnAccessIncludePath /data - OnAccessIncludePath /home - OnAccessIncludePath /mnt - OnAccessIncludePath /output - OnAccessIncludePath /scratch - OnAccessIncludePath /shared - # Prevent access to infected files - OnAccessPrevention yes - OnAccessExcludeUname clamav - OnAccessExcludeRootUID yes - - - path: "/etc/cron.d/clamav-freshclam" - permissions: "0644" - content: | - # Run every day at a fixed time - {{shm.monitoring.updateServers.schedule.daily_definition_updates.minute}} {{shm.monitoring.updateServers.schedule.daily_definition_updates.hour}} * * * freshclam - - - path: "/etc/cron.d/runonce" - permissions: "0644" - content: | - # Run on every reboot - @reboot root /usr/local/bin/runonce | logger -t SafeHaven - - - path: "/etc/domain-join.secret" - permissions: "0400" - content: | - {{{srd.domainJoinPassword}}} - - - path: "/etc/jaas.conf" - permissions: "0444" - content: | - pgjdbc { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - debug=true - renewTGT=true - doNotPrompt=true; - }; - - - path: "/etc/ldap.conf" - permissions: "0444" - content: | - # The distinguished name of the search base. - base {{shm.domain.ous.researchUsers.path}} - # Restrict users to those in the security group of this SRE - filter {{{srd.ldapUserFilter}}} - # Specify the LDAP server by URI - uri ldap://{{shm.dc.hostnameUpper}}.{{shm.domain.fqdnLower}}:389 - # The LDAP version to use (defaults to 3 if supported by client library) - ldap_version 3 - # The distinguished name used to bind to the server - rootbinddn {{srd.ldapSearchUserDn}} - # Do not hash the password: rely on the server to do so (default) - pam_password md5 - - - path: "/etc/ldap.secret" - permissions: "0400" - content: | - {{{srd.ldapSearchUserPassword}}} - - - path: "/etc/local/runonce.d/00-join-domain.sh" - permissions: "0500" - content: | - #! /bin/bash - echo ">=== Joining domain '{{shm.domain.fqdnLower}}'... ===<" - while [ "$(/usr/sbin/realm discover {{shm.domain.fqdnLower}} 2> /dev/null | grep server-software | awk '{print $2}')" != "active-directory" ]; do echo "Waiting for realm availability..."; sleep 1; done - echo "/opt/configuration/join_domain.sh '{{shm.domain.fqdnLower}}' '{{shm.domain.ous.linuxServers.path}}' '{{shm.users.computerManagers.linuxServers.samAccountName}}' '{{srd.hostname}}'" - /opt/configuration/join_domain.sh "{{shm.domain.fqdnLower}}" "{{shm.domain.ous.linuxServers.path}}" "{{shm.users.computerManagers.linuxServers.samAccountName}}" "{{srd.hostname}}" - - - path: "/etc/local/runonce.d/01-remount-nfs.sh" - permissions: "0500" - content: | - #! /bin/bash - echo ">=== Re-mounting nfs after domain join... ===<" - systemctl start home.mount - systemctl start shared.mount - - - path: "/etc/krb5.conf" - permissions: "0444" - content: | - {{krb5.mustache.conf}} - - - path: "/etc/pip.conf" - permissions: "0444" - content: | - # Add the PyPI mirror to our global settings - [global] - index = {{sre.repositories.pypi.index}} - index-url = {{sre.repositories.pypi.indexUrl}} - trusted-host = {{sre.repositories.pypi.host}} - - - path: "/etc/realmd.conf" - permissions: "0400" - content: | - {{realmd.mustache.conf}} - - - path: "/etc/R/Rprofile.site" - permissions: "0444" - content: | - ## Set Rprofile.site to the appropriate CRAN mirror - local({ - r <- getOption("repos") - r["CRAN"] <- "{{sre.repositories.cran.url}}" - options(repos = r) - }) - - - path: "/etc/skel/Desktop/CodiMD.desktop" - permissions: "0755" - content: | - [Desktop Entry] - Version=1.0 - Type=Link - Name=CodiMD - Comment= - Icon=/opt/icons/codimd.png - URL=http://{{sre.webapps.codimd.fqdn}} - - - path: "/etc/skel/Desktop/GitLab.desktop" - permissions: "0755" - content: | - [Desktop Entry] - Version=1.0 - Type=Link - Name=GitLab - Comment= - Icon=/opt/icons/gitlab.png - URL=http://{{sre.webapps.gitlab.fqdn}} - - - path: "/etc/skel/.bashrc" - permissions: "0644" - content: | - # determine if terminal has color support - case "$TERM" in - xterm-color|*-256color) color_prompt=yes;; - esac - # set prompt - if [ "$color_prompt" = yes ]; then - PS1='\[\033[01;32m\]\u\[\033[00m\]:\[\033[01;34m\]\W\[\033[00m\]\$ ' - else - PS1='\u:\W\$ ' - fi - unset color_prompt - # enable color support for ls and grep - if [ -x /usr/bin/dircolors ]; then - test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" - alias ls='ls --color=auto' - alias grep='grep --color=auto' - alias fgrep='fgrep --color=auto' - alias egrep='egrep --color=auto' - fi - - - path: "/etc/skel/.config/JetBrains/PYCHARM_VERSION/options/project.default.xml" - permissions: "0400" - content: | - {{project.default.xml}} - - - path: "/etc/skel/.xsession" - permissions: "0444" - content: | - xfce4-session - - - path: "/etc/systemd/system/clamav-clamonacc.service" - permissions: "0644" - content: | - {{clamav-clamonacc.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.service" - permissions: "0644" - content: | - {{clamav-clamdscan.service}} - - - path: "/etc/systemd/system/clamav-clamdscan.timer" - permissions: "0644" - content: | - {{clamav-clamdscan.timer}} - - - path: "/etc/systemd/system/backup.mount" - permissions: "0644" - content: | - [Unit] - Description=Mount unit for backup volume (via blobfuse) - Requires=network-online.target - After=network-online.target - - [Mount] - What=/opt/configuration/mount-backup.sh - Where=/backup - Type=fuse - Options=_netdev - - [Install] - WantedBy=network-online.target - - - path: "/etc/systemd/system/backup.automount" - permissions: "0644" - content: | - [Unit] - Description=Automount blobfuse - ConditionPathExists=/backup - - [Automount] - Where=/backup - TimeoutIdleSec=10 - - [Install] - WantedBy=multi-user.target - - - path: "/etc/systemd/system/data.mount" - permissions: "0644" - content: | - [Unit] - Description=Mount unit for ingress volume (via blobfuse) - Requires=network-online.target - After=network-online.target - - [Mount] - What=/opt/configuration/mount-ingress.sh - Where=/data - Type=fuse - Options=_netdev - - [Install] - WantedBy=network-online.target - - - path: "/etc/systemd/system/data.automount" - permissions: "0644" - content: | - [Unit] - Description=Automount blobfuse - ConditionPathExists=/data - - [Automount] - Where=/data - TimeoutIdleSec=10 - - [Install] - WantedBy=multi-user.target - - - path: "/etc/systemd/system/output.mount" - permissions: "0644" - content: | - [Unit] - Description=Mount unit for egress volume (via blobfuse) - Requires=network-online.target - After=network-online.target - - [Mount] - What=/opt/configuration/mount-egress.sh - Where=/output - Type=fuse - Options=_netdev - - [Install] - WantedBy=network-online.target - - - path: "/etc/systemd/system/output.automount" - permissions: "0644" - content: | - [Unit] - Description=Automount blobfuse - ConditionPathExists=/output - - [Automount] - Where=/output - TimeoutIdleSec=10 - - [Install] - WantedBy=multi-user.target - - - path: "/etc/xdg/xfce4/terminal/terminalrc" - permissions: "0644" - content: | - [Configuration] - - - path: "/etc/xrdp/startwm.sh" - permissions: "0755" - content: | - #!/bin/sh - if [ -r /etc/default/locale ]; then - . /etc/default/locale - export LANG LANGUAGE - fi - # Start xfce4 - startxfce4 - - - path: "/opt/configuration/check_dns.sh" - permissions: "0500" - content: | - {{check_dns.sh}} - - - path: "/opt/configuration/check_hostname.sh" - permissions: "0500" - content: | - {{check_hostname.sh}} - - - path: "/opt/configuration/check_ntp.sh" - permissions: "0500" - content: | - {{check_ntp.sh}} - - - path: "/opt/configuration/configure_pam.sh" - permissions: "0500" - content: | - # Edit the PAM session configuration file - PAM_INFORMATION="session required|pam_mkhomedir.so|skel=/etc/skel/|umask=0022" - sed "/pam_unix/ a $PAM_INFORMATION" /etc/pam.d/common-session | tr "|" "\t" > /etc/pam.d/common-session.tmp - mv /etc/pam.d/common-session.tmp /etc/pam.d/common-session - grep -v -e '^[[:space:]]*$' /etc/pam.d/common-session | grep -v "^#" | sed 's|^| /etc/pam.d/common-session |' - - - path: "/opt/configuration/configure_pycharm.py" - permissions: "0500" - content: | - {{configure_pycharm.py}} - - - path: "/opt/configuration/credentials-backup.secret" - permissions: "0400" - content: | - accountName {{sre.storage.persistentdata.account.name}} - sasToken {{{srd.backupContainerSasToken}}} - authType SAS - containerName backup - - - path: "/opt/configuration/credentials-egress.secret" - permissions: "0400" - content: | - accountName {{sre.storage.persistentdata.account.name}} - sasToken {{{srd.egressContainerSasToken}}} - authType SAS - containerName egress - - - path: "/opt/configuration/credentials-ingress.secret" - permissions: "0400" - content: | - accountName {{sre.storage.persistentdata.account.name}} - sasToken {{{srd.ingressContainerSasToken}}} - authType SAS - containerName ingress - - - path: "/opt/configuration/jdk-template.xml" - permissions: "0400" - content: | - {{jdk-template.xml}} - - - path: "/opt/configuration/join_domain.sh" - permissions: "0500" - content: | - {{join_domain.sh}} - - - path: "/opt/configuration/mount-backup.sh" - permissions: "0500" - content: | - if [ ! "$(df -h | grep $1)" ]; then - BLOBFUSE_CACHE_DIR="/tmp/blobfuse-cache-backup" - rm -rf $BLOBFUSE_CACHE_DIR - mkdir -p $BLOBFUSE_CACHE_DIR - CACHE_SPACE_MB=$(echo "$(findmnt -nb -o size /) / 1024^2 / 50" | bc) # set the cache size to 2% of the OS disk size - /usr/bin/blobfuse $1 -o rw --tmp-path=$BLOBFUSE_CACHE_DIR --cache-size-mb=$CACHE_SPACE_MB --no-symlinks=true --config-file=/opt/configuration/credentials-backup.secret --log-level=LOG_DEBUG -o attr_timeout=240 -o entry_timeout=240 -o negative_timeout=120 -o allow_other - fi - - - path: "/opt/configuration/mount-egress.sh" - permissions: "0500" - content: | - if [ ! "$(df -h | grep $1)" ]; then - BLOBFUSE_CACHE_DIR="/tmp/blobfuse-cache-egress" - rm -rf $BLOBFUSE_CACHE_DIR - mkdir -p $BLOBFUSE_CACHE_DIR - CACHE_SPACE_MB=$(echo "$(findmnt -nb -o size /) / 1024^2 / 50" | bc) # set the cache size to 2% of the OS disk size - /usr/bin/blobfuse $1 -o rw --tmp-path=$BLOBFUSE_CACHE_DIR --cache-size-mb=$CACHE_SPACE_MB --no-symlinks=true --config-file=/opt/configuration/credentials-egress.secret --log-level=LOG_DEBUG -o attr_timeout=240 -o entry_timeout=240 -o negative_timeout=120 -o allow_other - fi - - - path: "/opt/configuration/mount-ingress.sh" - permissions: "0500" - content: | - if [ ! "$(df -h | grep $1)" ]; then - BLOBFUSE_CACHE_DIR="/tmp/blobfuse-cache-ingress" - rm -rf $BLOBFUSE_CACHE_DIR - mkdir -p $BLOBFUSE_CACHE_DIR - CACHE_SPACE_MB=$(echo "$(findmnt -nb -o size /) / 1024^2 / 50" | bc) # set the cache size to 2% of the OS disk size - /usr/bin/blobfuse $1 -o ro --tmp-path=$BLOBFUSE_CACHE_DIR --cache-size-mb=$CACHE_SPACE_MB --no-symlinks=true --config-file=/opt/configuration/credentials-ingress.secret --log-level=LOG_DEBUG -o attr_timeout=240 -o entry_timeout=240 -o negative_timeout=120 -o allow_other - fi - - - path: "/usr/local/bin/runonce" - permissions: "0500" - content: | - {{runonce.sh}} - - - path: "/usr/share/xrdp/xrdp_custom_logo.bmp" - encoding: gz+b64 - permissions: "0644" - content: | - {{srd.xrdpCustomLogoEncoded}} - -# Set locale and timezone -locale: en_GB.UTF-8 -timezone: {{sre.time.timezone.linux}} - -# Set the NTP server -# By default we use Google's NTP servers which are incompatible with other servers due to leap-second smearing -ntp: - enabled: true - pools: - {{#shm.time.ntp.serverAddresses}} - - {{.}} - {{/shm.time.ntp.serverAddresses}} - -# Set hostname -# We need the FQDN in the hostname as without it we cannot set service principals when joining the Windows domain -fqdn: "{{srd.hostname}}.{{shm.domain.fqdnLower}}" -hostname: {{srd.hostname}} -manage_etc_hosts: true -prefer_fqdn_over_hostname: true - -runcmd: - - echo ">=== Beginning SRD configuration... ===<" - - # Clean up installation - - echo ">=== Cleaning up apt-get packages... ===<" - - apt update - - apt-get -y autoremove - - apt-get clean - - apt --fix-broken install - - # Set permissions on scratch directory - - chmod ugo+rwx /scratch - - # Ensure that auditd is running and enabled at startup - - echo ">=== Enabling auditd services... ===<" - - systemctl start auditd - - systemctl enable auditd - - sleep 20 - - systemctl status auditd - - # Configure ClamAV - - echo ">=== Configure ClamAV... ===<" - # Allow unlimited recursion when scanning - - sed -i 's/^MaxDirectoryRecursion .*/MaxDirectoryRecursion 0/' /etc/clamav/clamd.conf - # Enable ClamAV daemon - - systemctl enable clamav-daemon - # Enable ClamAV scan on access - - systemctl enable clamav-clamonacc - # Enable ClamAV daily scan - - systemctl enable clamav-clamdscan.timer - # Disable ClamAV database update on boot - - systemctl stop clamav-freshclam - - systemctl disable clamav-freshclam - - # Configure services for realm joining - - echo ">=== Configuring domain joining settings... ===<" - - OSVERSION=$(lsb_release -a 2> /dev/null | grep "Description:" | awk '{print $3}') - - sed -i -E "s|(os-version = ).*|\1$OSVERSION|" /etc/realmd.conf - - grep -v -e '^[[:space:]]*$' /etc/realmd.conf | grep -v "^#" | sed 's|^| /etc/realmd.conf |' - - - echo ">=== Checking DNS... ===<" - - /opt/configuration/check_dns.sh - - - echo ">=== Checking hostname... ===<" - - /opt/configuration/check_hostname.sh - - - echo ">=== Updating PAM configuration... ===<" - - /opt/configuration/configure_pam.sh - - systemctl start sssd - - systemctl enable sssd - - sleep 20 - - systemctl status sssd - - # Check Kerberos settings - - echo ">=== Checking LDAP config... ===<" - - grep -v -e '^[[:space:]]*$' /etc/ldap.conf | grep -v "^#" | sed 's|^| /etc/ldap.conf |' - - echo ">=== Checking Kerberos config... ===<" - - grep -v -e '^[[:space:]]*$' /etc/krb5.conf | sed 's|^| /etc/krb5.conf |' - - # Check timezone and NTP server - - echo ">=== Current time synchronisation status... ===<" - - /opt/configuration/check_ntp.sh - - # PyCharm configuration - - echo ">=== Configuring PyCharm... ===<" - - PYCHARM_VERSION=$(grep "dataDirectoryName" /snap/pycharm-community/current/product-info.json | cut -d':' -f2 | xargs | sed "s/,//") - - python3 /opt/configuration/configure_pycharm.py /etc/skel/.config/JetBrains/PYCHARM_VERSION/options/jdk.table.xml - - mv /etc/skel/.config/JetBrains/PYCHARM_VERSION /etc/skel/.config/JetBrains/${PYCHARM_VERSION} - - # Set default keyboard to a generic 105 key en-GB - - echo ">=== Configuring keyboard... ===<" - - sed -i 's|XKBMODEL=.*|XKBMODEL=\"pc105\"|g' /etc/default/keyboard - - sed -i 's|XKBLAYOUT=.*|XKBLAYOUT=\"gb\"|g' /etc/default/keyboard - - grep -v -e '^[[:space:]]*$' /etc/default/keyboard | sed 's|^| /etc/default/keyboard |' - - # Set default terminal and panel for xfce - - echo ">=== Setting xfce default panel and terminal and disabling screen lock... ===<" - - sed -i -E 's/(TerminalEmulator=).*/\1xfce4-terminal/' /etc/xdg/xfce4/helpers.rc - - cp /etc/xdg/xfce4/panel/default.xml /etc/xdg/xfce4/xfconf/xfce-perchannel-xml/xfce4-panel.xml - # Disable light-locker which can cause irritating error messages - - echo "Hidden=true" >> /etc/xdg/autostart/light-locker.desktop - # Ensure that if xscreensave is installed it will use blank mode - - | - echo "xscreensaver.mode: blank" >> /etc/X11/Xresources/x11-common - # Set terminal colour scheme - - | - THEME="dark-pastels" - if [ -f /usr/share/xfce4/terminal/colorschemes/${THEME}.theme ]; then - echo "Setting terminal colour scheme to $THEME" - grep -v -e Name -e Scheme /usr/share/xfce4/terminal/colorschemes/${THEME}.theme >> /etc/xdg/xfce4/terminal/terminalrc - else - echo "Could not find terminal colour scheme $THEME" - rm -rf /etc/xdg/xfce4/terminal/terminalrc - fi - - # Configure xrdp login window - - echo ">=== Setting xrdp defaults... ===<" - - sed -i "s|dark_grey=808080|dark_grey=ffffff|g" /etc/xrdp/xrdp.ini # title bar colour (unselected) - - sed -i "s|blue=009cb5|blue=ffffff|g" /etc/xrdp/xrdp.ini # title bar colour (selected) - - sed -i "s|ls_top_window_bg_color=.*|ls_top_window_bg_color=000000|g" /etc/xrdp/xrdp.ini # teal background colour - - sed -i "s|ls_bg_color=.*|ls_bg_color=ffffff|g" /etc/xrdp/xrdp.ini # grey box colour - - sed -i "s|.*ls_title=.*|ls_title=\.|g" /etc/xrdp/xrdp.ini # window title - - sed -i "s|ls_logo_filename=.*|ls_logo_filename=/usr/share/xrdp/xrdp_custom_logo.bmp|g" /etc/xrdp/xrdp.ini # logo - - sed -i "s|ls_btn_ok_x_pos=.*|ls_btn_ok_x_pos=85|g" /etc/xrdp/xrdp.ini # centre OK button - - sed -i "s|ls_btn_cancel_x_pos=.*|ls_btn_cancel_x_pos=180|g" /etc/xrdp/xrdp.ini # centre cancel button - # Remove all sessions except Xorg - - sed -i '/\[X11rdp\]/,/^$/d' /etc/xrdp/xrdp.ini # delete lines from [X11rdp] until next empty line - - sed -i '/\[Xvnc\]/,/^$/d' /etc/xrdp/xrdp.ini # delete lines from [Xvnc] until next empty line - - sed -i '/\[console\]/,/^$/d' /etc/xrdp/xrdp.ini # delete lines from [console] until next empty line - - sed -i '/\[vnc-any\]/,/^$/d' /etc/xrdp/xrdp.ini # delete lines from [vnc-any] until next empty line - - sed -i '/\[sesman-any\]/,/^$/d' /etc/xrdp/xrdp.ini # delete lines from [sesman-any] until next empty line - - sed -i '/\[neutrinordp-any\]/,/^$/d' /etc/xrdp/xrdp.ini # delete lines from [neutrinordp-any] until next empty line - - grep -v -e '^[[:space:]]*$' /etc/xrdp/xrdp.ini | grep -v -e '^[;#]' | sed 's|^| /etc/xrdp/xrdp.ini |' - - # Ensure that xrdp is running and enabled at startup - - echo ">=== Enabling xrdp services... ===<" - - systemctl enable xrdp - - systemctl enable xrdp-sesman - - # Schedule mounting of data and output volumes, allowing non-root users to specify 'allow_other' - - echo ">=== Configure ingress, egress and backup mounts... ===<" - - grep -v "user_allow_other" /etc/fuse.conf > /etc/fuse.conf.tmp - - echo "user_allow_other" >> /etc/fuse.conf.tmp - - mv /etc/fuse.conf.tmp /etc/fuse.conf - - systemctl enable backup.mount - - systemctl enable data.mount - - systemctl enable output.mount - - systemctl enable backup.automount - - systemctl enable data.automount - - systemctl enable output.automount - - # Use tmpfs so tmp is in memory where possible rather than entirely on disk - - echo ">=== Configure tmpfs... ===<" - - echo "tmpfs /tmp tmpfs rw,nosuid,nodev,noexec" | tee -a /etc/fstab - - # List fstab and the volume mount order. Note that blobfuse mounts are not using fstab - - echo ">=== Checking disk mounts... ===<" - - grep -v -e '^[[:space:]]*$' /etc/fstab | sed 's|^| /etc/fstab |' - - mount -fav 2> /dev/null - -final_message: "System setup through cloud-init is finished. Configuration took $UPTIME seconds" - -# Shutdown so that we can tell when the job has finished by polling the VM state -power_state: - mode: poweroff - message: "Shutting down as a signal that setup is finished" - timeout: 30 - condition: true diff --git a/deployment/secure_research_environment/cloud_init/resources/check_dns.sh b/deployment/secure_research_environment/cloud_init/resources/check_dns.sh deleted file mode 100644 index f45630634e..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/check_dns.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/bash - -# Add systemd-resolved to list of services to start on boot -systemctl enable systemd-resolved - -# Restart systemd networking -systemctl daemon-reload -systemctl restart systemd-networkd -systemctl restart systemd-resolved - -# Output current settings -grep -v -e '^[[:space:]]*$' /etc/resolv.conf | grep -v "^#" | sed 's|^| /etc/resolv.conf |' - -# Check DNS settings -systemd-resolve --status diff --git a/deployment/secure_research_environment/cloud_init/resources/check_hostname.sh b/deployment/secure_research_environment/cloud_init/resources/check_hostname.sh deleted file mode 100644 index 3e48156a07..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/check_hostname.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash - -# Check /etc/hostname -grep -v -e '^[[:space:]]*$' /etc/hostname | grep -v "^#" | sed 's|^| /etc/hostname |' - -# Check /etc/hosts -grep -v -e '^[[:space:]]*$' /etc/hosts | grep -v "^#" | sed 's|^| /etc/hosts |' diff --git a/deployment/secure_research_environment/cloud_init/resources/check_ntp.sh b/deployment/secure_research_environment/cloud_init/resources/check_ntp.sh deleted file mode 100644 index ad9a75045f..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/check_ntp.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -# Check timezone and NTP server -echo "Date: $(date)" -echo "Timezone: $(timedatectl | grep "Time zone" | cut -d ':' -f 2 | sed -e 's/^[[:space:]]*//')" -echo "NTP status: $(timedatectl | grep "NTP service" | cut -d ':' -f 2 | sed -e 's/^[[:space:]]*//')" -echo "NTP server(s): $(grep '^NTP=' -h /etc/systemd/timesyncd.conf /etc/systemd/timesyncd.conf.d/*conf 2> /dev/null | grep -v '^#' | cut -d '=' -f2)" - -# Check the timesync service -# Note that 'timedatectl show-timesync --all' should be a more informative option but does not work as expected -systemctl status systemd-timesyncd diff --git a/deployment/secure_research_environment/cloud_init/resources/configure_pycharm.py b/deployment/secure_research_environment/cloud_init/resources/configure_pycharm.py deleted file mode 100644 index fbc1e188ea..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/configure_pycharm.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Generate a PyCharm XML configuration file""" -import io -import os -import sys -import xml.etree.ElementTree as ElementTree - - -def generate_xml_output(target): - """Generate a PyCharm XML configuration file""" - # Construct by find-and-replace on a template XML file - jdk_output = ["", ''] - with open("/opt/configuration/jdk-template.xml", "r") as f_jdk: - jdk_template_lines = [line.strip() for line in f_jdk.readlines()] - for python_version in sorted(os.listdir("/opt/pyenv/versions")): - python_short_version = ".".join(python_version.split(".")[0:2]) - python_environment = f"py{python_short_version.replace('.', '')}" - jdk_output += [ - line.replace(r"<>", python_version) - .replace(r"<>", python_short_version) - .replace(r"<>", python_environment) - for line in jdk_template_lines - if line - ] - jdk_output += ["", ""] - - # Write out via ElementTree to validate our XML - print(f"Writing XML output to {target}") - tree = ElementTree.parse(io.StringIO("\n".join(jdk_output))) - tree.write(target) - - -if __name__ == "__main__": - generate_xml_output(sys.argv[1]) diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_db_permissions.sql b/deployment/secure_research_environment/cloud_init/resources/guacamole_db_permissions.sql deleted file mode 100644 index 3c08a08213..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_db_permissions.sql +++ /dev/null @@ -1,12 +0,0 @@ -SELECT - guacamole_connection.connection_name, - guacamole_entity.name -FROM - guacamole_connection_permission - JOIN guacamole_connection ON guacamole_connection_permission.connection_id = guacamole_connection.connection_id - JOIN guacamole_user_group ON guacamole_user_group.entity_id = guacamole_connection_permission.entity_id - JOIN guacamole_user_group_member ON guacamole_user_group_member.user_group_id = guacamole_user_group.user_group_id - JOIN guacamole_entity ON guacamole_entity.entity_id = guacamole_user_group_member.member_entity_id -WHERE - connection_name LIKE 'Desktop:%' - AND permission = 'READ'; \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_db_update.mustache.sql b/deployment/secure_research_environment/cloud_init/resources/guacamole_db_update.mustache.sql deleted file mode 100644 index 98dd1cb38c..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_db_update.mustache.sql +++ /dev/null @@ -1,138 +0,0 @@ -/* Require that connection names are unique */ -ALTER TABLE guacamole_connection DROP CONSTRAINT IF EXISTS connection_name_constraint; -ALTER TABLE guacamole_connection ADD CONSTRAINT connection_name_constraint UNIQUE (connection_name); - -/* Load connections from text file */ -CREATE TABLE connections (connection_name VARCHAR(128), ip_address VARCHAR(32)); -COPY connections FROM '/var/lib/postgresql/data/connections.csv' (FORMAT CSV, DELIMITER(';')); - -/* Add initial connections via RDP and ssh*/ -INSERT INTO guacamole_connection (connection_name, protocol) -SELECT CONCAT(connection_type, ' ', connection_name), protocol -FROM - ( - VALUES - ('Desktop:', 'rdp'), - ('SSH:', 'ssh') - ) connection_settings (connection_type, protocol) - CROSS JOIN connections -ON CONFLICT DO NOTHING; - -/* Add connection details */ -INSERT INTO guacamole_connection_parameter (connection_id, parameter_name, parameter_value) - SELECT connection_id, parameter_name, COALESCE(parameter_value, ip_address) - FROM - ( - VALUES - ('hostname', null), - ('disable-copy', '{{guacamole.disableCopy}}'), - ('disable-paste', '{{guacamole.disablePaste}}'), - ('clipboard-encoding', 'UTF-8'), - ('timezone', '{{sre.time.timezone.linux}}'), - ('server-layout', 'en-gb-qwerty') - ) connection_settings (parameter_name, parameter_value) - CROSS JOIN guacamole_connection - JOIN connections ON guacamole_connection.connection_name LIKE CONCAT('%', connections.connection_name) -ON CONFLICT DO NOTHING; - -/* Remove obsolete connections (NB. this will cascade delete guacamole_connection_parameter entries) */ -DELETE FROM guacamole_connection -WHERE NOT EXISTS ( - SELECT FROM connections - WHERE guacamole_connection.connection_name LIKE CONCAT('%', connections.connection_name) -); - -/* Drop the temporary connections table */ -DROP TABLE connections; - -/* Ensure that all LDAP users are Guacamole entities */ -INSERT INTO guacamole_entity (name, type) -SELECT usename, 'USER' -FROM - pg_user - JOIN pg_auth_members ON (pg_user.usesysid = pg_auth_members.member) - JOIN pg_roles ON (pg_roles.oid = pg_auth_members.roleid) - WHERE rolname = 'ldap_users' -ON CONFLICT DO NOTHING; - -/* Ensure that all LDAP users are Guacamole users */ -INSERT INTO guacamole_user (entity_id, password_hash, password_salt, password_date) -SELECT entity_id, password_hash, password_salt, CURRENT_TIMESTAMP as password_date -FROM - ( - SELECT - usename, - decode(md5(random() :: text), 'hex'), - decode(md5(random() :: text), 'hex') - FROM - pg_user - JOIN pg_auth_members ON (pg_user.usesysid = pg_auth_members.member) - JOIN pg_roles ON (pg_roles.oid = pg_auth_members.roleid) - WHERE rolname = 'ldap_users' - ) user_details (username, password_hash, password_salt) - JOIN guacamole_entity ON user_details.username = guacamole_entity.name -ON CONFLICT DO NOTHING; - -/* Ensure that all user groups are Guacamole entities */ -INSERT INTO guacamole_entity (name, type) -SELECT groname, 'USER_GROUP' -FROM - pg_group - WHERE (groname LIKE 'SG %') -ON CONFLICT DO NOTHING; - -/* Ensure that all user groups are Guacamole user groups */ -INSERT INTO guacamole_user_group (entity_id) -SELECT entity_id -FROM - guacamole_entity WHERE type = 'USER_GROUP' -ON CONFLICT DO NOTHING; - -/* Ensure that all users are added to the correct group */ -DELETE FROM guacamole_user_group_member; -INSERT INTO guacamole_user_group_member (user_group_id, member_entity_id) -SELECT guacamole_user_group.user_group_id, guac_user.entity_id -FROM - pg_group - JOIN pg_user ON pg_has_role(pg_user.usesysid, grosysid, 'member') - JOIN guacamole_entity guac_group ON pg_group.groname = guac_group.name - JOIN guacamole_entity guac_user ON pg_user.usename = guac_user.name - JOIN guacamole_user_group ON guacamole_user_group.entity_id = guac_group.entity_id - WHERE (groname LIKE 'SG %') -ON CONFLICT DO NOTHING; - -/* Grant administration permissions to members of the SRE System Administrators group */ -INSERT INTO guacamole_system_permission (entity_id, permission) -SELECT entity_id, permission :: guacamole_system_permission_type -FROM - ( - VALUES - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'CREATE_CONNECTION'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'CREATE_CONNECTION_GROUP'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'CREATE_SHARING_PROFILE'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'CREATE_USER'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'CREATE_USER_GROUP'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'ADMINISTER') - ) group_permissions (username, permission) - JOIN guacamole_entity ON group_permissions.username = guacamole_entity.name AND guacamole_entity.type = 'USER_GROUP' -ON CONFLICT DO NOTHING; - -/* Assign connection permissions to each group */ -DELETE FROM guacamole_connection_permission; -INSERT INTO guacamole_connection_permission (entity_id, connection_id, permission) - SELECT entity_id, connection_id, permission::guacamole_object_permission_type - FROM - ( - VALUES - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'READ'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'UPDATE'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'DELETE'), - ('{{sre.domain.securityGroups.systemAdministrators.name}}', 'ADMINISTER'), - ('{{sre.domain.securityGroups.researchUsers.name}}', 'READ') - ) group_permissions (username, permission) - CROSS JOIN guacamole_connection - JOIN guacamole_entity ON group_permissions.username = guacamole_entity.name -ON CONFLICT DO NOTHING; - -/* Remove the default guacadmin user */ -DELETE FROM guacamole_entity WHERE guacamole_entity.name = 'guacadmin'; diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_docker_compose.mustache.yaml b/deployment/secure_research_environment/cloud_init/resources/guacamole_docker_compose.mustache.yaml deleted file mode 100644 index af716fd6f0..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_docker_compose.mustache.yaml +++ /dev/null @@ -1,101 +0,0 @@ -# Note that the environment variables are not all documented, but can be found in https://github.com/apache/guacamole-client/blob/1.3.0/guacamole-docker/bin/start.sh -# -# We are not currently using the LDAP settings, but they are left here in case we move MFA into a separate app and switch back to LDAP authentication -# https://enterprise.glyptodon.com/doc/latest/storing-connection-data-within-ldap-950383.html has a good write-up of how to control access using group membership -# -# Originally based on boschkundendienst/guacamole-docker-compose -version: '3.7' -networks: - network_private: - network_public: -services: - postgres: - container_name: postgres_guacamole_compose - image: postgres:16.2 - networks: - - network_private - ports: - - 127.0.0.1:5432:5432 - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - - /opt/postgresql/data:/var/lib/postgresql/data:rw - - /opt/postgresql/scripts:/scripts:ro - environment: - PGDATA: /var/lib/postgresql/data/guacamole - POSTGRES_DB: guacamole - POSTGRES_USER: guacamole - POSTGRES_PASSWORD: {{{guacamole.internalDbPassword}}} - healthcheck: - test: ["CMD-SHELL", "pg_isready -U postgres"] - interval: 5s - timeout: 5s - retries: 5 - restart: always - - guacd: - container_name: guacd_guacamole_compose - image: guacamole/guacd:1.5.5 - networks: - - network_private - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - environment: - GUACD_LOG_LEVEL: debug - restart: always - - guacamole: - container_name: guacamole_guacamole_compose - image: guacamole/guacamole:1.5.5 - depends_on: - guacd: - condition: service_started - postgres: - condition: service_healthy - networks: - - network_private - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - environment: - # Parameters are here: https://github.com/apache/guacamole-client/blob/master/guacamole-docker/bin/start.sh - GUACD_HOSTNAME: guacd - LOGBACK_LEVEL: debug - POSTGRES_HOSTNAME: postgres - POSTGRES_DATABASE: guacamole - POSTGRES_USER: guacamole - POSTGRES_PASSWORD: {{{guacamole.internalDbPassword}}} - OPENID_AUTHORIZATION_ENDPOINT: https://login.microsoftonline.com/{{guacamole.tenantId}}/oauth2/v2.0/authorize - OPENID_JWKS_ENDPOINT: https://login.microsoftonline.com/{{guacamole.tenantId}}/discovery/v2.0/keys - OPENID_ISSUER: https://login.microsoftonline.com/{{guacamole.tenantId}}/v2.0 - OPENID_USERNAME_CLAIM_TYPE: preferred_username - OPENID_CLIENT_ID: {{guacamole.applicationId}} - OPENID_REDIRECT_URI: https://{{sre.domain.fqdn}} - restart: always - - nginx: - container_name: nginx_guacamole_compose - image: nginx:1.25 - links: - - guacamole - networks: - - network_private - - network_public - ports: - - 80:80 - - 443:443 - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - - /opt/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - - /opt/nginx/guacamole.conf:/etc/nginx/conf.d/guacamole.conf:ro - - /opt/ssl/conf:/etc/letsencrypt:ro - - /opt/ssl/dhparam.pem:/etc/ssl/certs/dhparam.pem:ro - command: | - /bin/sh -c "while :; do - sleep 6h & wait $${!}; - nginx -s reload; - done & - nginx -g 'daemon off;'" - restart: always diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_nginx_guacamole.mustache.conf b/deployment/secure_research_environment/cloud_init/resources/guacamole_nginx_guacamole.mustache.conf deleted file mode 100644 index c3e812e22f..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_nginx_guacamole.mustache.conf +++ /dev/null @@ -1,49 +0,0 @@ -server { - listen 80; - server_name {{sre.domain.fqdn}}; - location / { - return 301 https://$host$request_uri; - } - location /.well-known/acme-challenge/ { - allow all; - root /var/www/certbot; - } -} -server { - listen [::]:443 ssl http2 default_server; - listen 443 ssl http2 default_server; - server_name {{sre.domain.fqdn}}; - add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; - - # SSL settings from https://gist.github.com/gavinhungry/7a67174c18085f4a23eb - ssl_certificate /etc/letsencrypt/live/{{sre.domain.fqdn}}/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/{{sre.domain.fqdn}}/privkey.pem; - ssl_dhparam /etc/ssl/certs/dhparam.pem; - ssl_protocols TLSv1.2 TLSv1.3; - ssl_prefer_server_ciphers on; - ssl_ecdh_curve secp521r1:secp384r1; - ssl_ciphers {{guacamole.sslCiphers}}; - ssl_session_cache shared:TLS:2m; - ssl_session_timeout 1d; - ssl_buffer_size 4k; - ssl_stapling on; - ssl_stapling_verify on; - add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always; - - location / { - proxy_pass http://guacamole:8080/guacamole/; - proxy_buffering off; - proxy_http_version 1.1; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $http_connection; - proxy_cookie_path /guacamole/ /; - access_log off; - client_max_body_size 4096m; - } - - error_page 500 502 503 504 /50x.html; - location /50x.html { - root /usr/share/nginx/html; - } -} diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_nginx_nginx.conf b/deployment/secure_research_environment/cloud_init/resources/guacamole_nginx_nginx.conf deleted file mode 100644 index 7fa419602a..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_nginx_nginx.conf +++ /dev/null @@ -1,20 +0,0 @@ -### AAA -user nginx; -worker_processes 1; -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; -events { - worker_connections 1024; -} -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - access_log /var/log/nginx/access.log main; - sendfile on; - keepalive_timeout 65; - include /etc/nginx/conf.d/*.conf; - server_tokens off; -} diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_pg_ldap_sync.mustache.yaml b/deployment/secure_research_environment/cloud_init/resources/guacamole_pg_ldap_sync.mustache.yaml deleted file mode 100644 index 2fa425472d..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_pg_ldap_sync.mustache.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# LDAP-synchronized groups/users are identified through their -# membership of ldap_user and ldap_group. These two roles must -# therefore be manually defined before running pg_ldap_sync. - -# Connection parameters to LDAP server -# see also: http://net-ldap.rubyforge.org/Net/LDAP.html#method-c-new -# Ideally we would use SSL (as below) but this is not currently working -# port: 636 -# encryption: -# method: :simple_tls -ldap_connection: - host: {{shm.dc.ip}} - port: 389 - auth: - method: :simple - username: {{guacamole.ldapSearchUserDn}} - password: {{{guacamole.ldapSearchUserPassword}}} - -# Search parameters for LDAP users which should be synchronized -ldap_users: - base: {{shm.domain.ous.researchUsers.path}} - # LDAP filter defining which LDAP users should be synchronized - filter: {{{guacamole.ldapUserFilter}}} - # this attribute is used as PG role name. It is 'sAMAccountName@domain' - name_attribute: userPrincipalName - # lowercase name for use as PG role name - lowercase_name: false - -# Search parameters for LDAP groups which should be synchronized -ldap_groups: - base: {{shm.domain.ous.securityGroups.path}} - # LDAP filter defining which LDAP groups should be synchronized - filter: {{{guacamole.ldapGroupFilter}}} - # this attribute is used as PG role name - name_attribute: cn - # lowercase name for use as PG role name - lowercase_name: false - # this attribute must reference to all member DN's of the given group - member_attribute: member - -# Connection parameters to PostgreSQL server -# see also: http://rubydoc.info/gems/pg/PG/Connection#initialize-instance_method -pg_connection: - host: localhost - dbname: guacamole - user: guacamole - password: {{{guacamole.internalDbPassword}}} - -pg_users: - # Filter for identifying LDAP generated users in the database. - # This is the WHERE-condition to "SELECT rolname, oid FROM pg_roles" - filter: oid IN (SELECT pam.member FROM pg_auth_members pam JOIN pg_roles pr ON pr.oid=pam.roleid WHERE pr.rolname='ldap_users') - # Options for CREATE RULE statements - create_options: LOGIN IN ROLE ldap_users - -pg_groups: - # Filter for identifying LDAP generated groups in the database. - # This is the WHERE-condition to "SELECT rolname, oid FROM pg_roles" - filter: oid IN (SELECT pam.member FROM pg_auth_members pam JOIN pg_roles pr ON pr.oid=pam.roleid WHERE pr.rolname='ldap_groups') - # Options for CREATE RULE statements - create_options: NOLOGIN IN ROLE ldap_groups - grant_options: diff --git a/deployment/secure_research_environment/cloud_init/resources/guacamole_synchronise_database.sh b/deployment/secure_research_environment/cloud_init/resources/guacamole_synchronise_database.sh deleted file mode 100644 index 1ab6527a39..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/guacamole_synchronise_database.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash -echo "Running LDAP synchronisation..." | tee >(logger -t SafeHaven) -/usr/local/bin/pg_ldap_sync -vvv -c /opt/pg-ldap-sync/configuration.yaml 2>&1 | logger -t SafeHaven -echo "Updating database..." | tee >(logger -t SafeHaven) -su guacamoledaemon -c "docker compose -f /opt/guacamole/docker-compose.yaml exec -T postgres psql -U guacamole -f /scripts/db_update.sql" | logger -t SafeHaven -echo "Finished database synchronisation" | tee >(logger -t SafeHaven) diff --git a/deployment/secure_research_environment/cloud_init/resources/jdk-template.xml b/deployment/secure_research_environment/cloud_init/resources/jdk-template.xml deleted file mode 100644 index f28032387c..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/jdk-template.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/deployment/secure_research_environment/cloud_init/resources/join_domain.sh b/deployment/secure_research_environment/cloud_init/resources/join_domain.sh deleted file mode 100644 index 05eb8664cd..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/join_domain.sh +++ /dev/null @@ -1,58 +0,0 @@ -#! /bin/sh - -# Get command line arguments -if [ $# -ne 4 ]; then - echo "$(basename $0) requires four arguments!"; -fi -DOMAIN_FQDN_LOWER=$1 -DOMAIN_JOIN_OU=$2 -DOMAIN_JOIN_USER=$3 -VM_HOSTNAME=$4 - -# Ensure that /etc/resolv.conf has the correct settings -echo "Ensuring that /etc/resolv.conf has the correct settings..." -sed -i -e "s/^[#]DNS=.*/DNS=/" -e "s/^[#]FallbackDNS=.*/FallbackDNS=/" -e "s/^[#]Domains=.*/Domains=${DOMAIN_FQDN_LOWER}/" /etc/systemd/resolved.conf -ln -rsf /run/systemd/resolve/resolv.conf /etc/resolv.conf -systemctl restart systemd-resolved - -# Check that hostname is correct -echo "Ensuring that hostname is correct..." -if [ "$(hostnamectl --static)" != "${VM_HOSTNAME}.${DOMAIN_FQDN_LOWER}" ] || (! grep -q "$VM_HOSTNAME" /etc/hosts); then - /opt/configuration/configure-hostname.sh > /dev/null -fi - -# Check the NTP service -echo "Ensuring that NTP service is running..." -if [ "$(systemctl is-active systemd-timesyncd)" != "active" ] && [ "$(systemctl is-enabled systemd-timesyncd)" = "enabled" ]; then - systemctl restart systemd-timesyncd - sleep 10 -fi -if [ "$(systemctl is-active chronyd)" != "active" ] && [ "$(systemctl is-enabled chronyd)" = "enabled" ]; then - systemctl restart chronyd - sleep 10 -fi - -# Check the DNS service -echo "Ensuring that DNS service is running..." -if [ "$(systemctl is-active systemd-resolved)" != "active" ]; then - systemctl restart systemd-resolved - sleep 10 -fi - -# Check the SSSD service -echo "Ensuring that SSSD service is running..." -if [ "$(systemctl is-active sssd)" != "active" ]; then - if [ -f /etc/sssd/sssd.conf ]; then rm -f /etc/sssd/sssd.conf; fi - systemctl restart sssd - sleep 10 -fi - -# Join realm - creating the SSSD config if it does not exist -echo "Joining realm '${DOMAIN_FQDN_LOWER}'..." -/usr/sbin/realm leave 2> /dev/null -cat /etc/domain-join.secret | /usr/sbin/realm join --verbose --computer-ou="${DOMAIN_JOIN_OU}" -U "${DOMAIN_JOIN_USER}" "${DOMAIN_FQDN_LOWER}" --install=/ 2>&1 - -# Update SSSD settings -echo "Updating SSSD settings..." -sed -i -E 's|(access_provider = ).*|\1simple|' /etc/sssd/sssd.conf -systemctl restart sssd diff --git a/deployment/secure_research_environment/cloud_init/resources/krb5.mustache.conf b/deployment/secure_research_environment/cloud_init/resources/krb5.mustache.conf deleted file mode 100644 index 06dcac7697..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/krb5.mustache.conf +++ /dev/null @@ -1,18 +0,0 @@ -[libdefaults] - default_realm = {{shm.domain.fqdnUpper}} - forwardable = true - kdc_timesync = true - proxiable = true - renew_lifetime = 7d - ticket_lifetime = 24h -[logging] - default = SYSLOG -[realms] - {{shm.domain.fqdnUpper}} = { - kdc = {{shm.dc.hostnameUpper}}.{{shm.domain.fqdnUpper}}:88 - admin_server = {{shm.dc.hostnameUpper}}.{{shm.domain.fqdnUpper}} - default_domain = {{shm.domain.fqdnUpper}} - } -[domain_realm] - .{{shm.domain.fqdnLower}} = {{shm.domain.fqdnUpper}} - {{shm.domain.fqdnLower}} = {{shm.domain.fqdnUpper}} diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_create_postgres_triggers.mustache.sql b/deployment/secure_research_environment/cloud_init/resources/postgres_create_postgres_triggers.mustache.sql deleted file mode 100644 index 922db6e148..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_create_postgres_triggers.mustache.sql +++ /dev/null @@ -1,63 +0,0 @@ -/* Ensure that all members of '{{sre.domain.securityGroups.systemAdministrators.name}}' have superuser permissions */ -/* Triggering on all ddl_command_end will catch any: CREATE, ALTER, DROP, SECURITY LABEL, COMMENT, GRANT or REVOKE command */ -/* We require that CURRENT_USER has SUPERUSER permissions inside the function, otherwise the ALTER USER calls will fail*/ -CREATE OR REPLACE FUNCTION fn_sysadmin_permissions() -RETURNS event_trigger -LANGUAGE plpgsql -AS $$ -DECLARE -obj record; -BEGIN -IF EXISTS (SELECT usename FROM pg_user WHERE ((usename = CURRENT_USER) AND (usesuper='t'))) THEN - FOR obj in SELECT * FROM pg_user - WHERE usesysid NOT IN ( - SELECT member FROM pg_auth_members - WHERE roleid = ( - SELECT oid FROM pg_roles - WHERE rolname = '{{sre.domain.securityGroups.systemAdministrators.name}}')) - AND NOT usename = 'postgres' - LOOP - EXECUTE format('ALTER USER "%s" WITH NOCREATEDB NOCREATEROLE NOSUPERUSER;', obj.usename); - END LOOP; - FOR obj in SELECT * FROM pg_user WHERE (pg_has_role(usesysid, '{{sre.domain.securityGroups.systemAdministrators.name}}', 'member')) LOOP - EXECUTE format('ALTER USER "%s" WITH CREATEDB CREATEROLE SUPERUSER;', obj.usename); - END LOOP; -END IF; -END; -$$; -CREATE EVENT TRIGGER trg_sysadmin_permissions ON ddl_command_end EXECUTE FUNCTION fn_sysadmin_permissions(); -/* Restrict default privileges on public schema to '{{sre.domain.securityGroups.researchUsers.name}}' */ -REVOKE ALL PRIVILEGES ON SCHEMA public FROM PUBLIC; -GRANT ALL PRIVILEGES ON SCHEMA public TO "{{sre.domain.securityGroups.researchUsers.name}}"; -/* Add a trigger so that new tables under 'public' schema are readable by '{{sre.domain.securityGroups.researchUsers.name}}' */ -CREATE OR REPLACE FUNCTION fn_public_schema_table_permissions() -RETURNS event_trigger -LANGUAGE plpgsql -AS $$ -DECLARE -obj record; -BEGIN -FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE (command_tag='CREATE TABLE' AND schema_name='public') LOOP - EXECUTE format('GRANT SELECT ON TABLE %s TO "{{sre.domain.securityGroups.researchUsers.name}}";', obj.object_identity); -END LOOP; -END; -$$; -CREATE EVENT TRIGGER trg_public_schema_table_permissions ON ddl_command_end WHEN tag IN ('CREATE TABLE') EXECUTE PROCEDURE fn_public_schema_table_permissions(); -/* Create the data schema: allow '{{sre.domain.securityGroups.researchUsers.name}}' to read and '{{sre.domain.securityGroups.dataAdministrators.name}}' to do anything */ -CREATE SCHEMA IF NOT EXISTS data AUTHORIZATION "{{sre.domain.securityGroups.dataAdministrators.name}}"; -GRANT ALL PRIVILEGES ON SCHEMA data TO "{{sre.domain.securityGroups.dataAdministrators.name}}"; -GRANT USAGE ON SCHEMA data TO "{{sre.domain.securityGroups.researchUsers.name}}"; -/* Add a trigger so that new tables under 'data' schema are owned by '{{sre.domain.securityGroups.dataAdministrators.name}}' and readable by '{{sre.domain.securityGroups.researchUsers.name}}' */ -CREATE OR REPLACE FUNCTION fn_data_schema_table_permissions() -RETURNS event_trigger -LANGUAGE plpgsql -AS $$ -DECLARE -obj record; -BEGIN -FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE (command_tag='CREATE TABLE' AND schema_name='data') LOOP - EXECUTE format('ALTER TABLE %s OWNER TO "{{sre.domain.securityGroups.dataAdministrators.name}}"; GRANT SELECT ON TABLE %s TO "{{sre.domain.securityGroups.researchUsers.name}}";', obj.object_identity, obj.object_identity); -END LOOP; -END; -$$; -CREATE EVENT TRIGGER trg_data_schema_table_permissions ON ddl_command_end WHEN tag IN ('CREATE TABLE') EXECUTE PROCEDURE fn_data_schema_table_permissions(); \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_generate_keytab.mustache.sh b/deployment/secure_research_environment/cloud_init/resources/postgres_generate_keytab.mustache.sh deleted file mode 100644 index 13fa21cbf0..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_generate_keytab.mustache.sh +++ /dev/null @@ -1,29 +0,0 @@ -#! /bin/bash -# Generate a keytab and verify by getting a Kerberos ticket -echo ">=== Generating service principal credentials... ===<" -SERVICE_PRINCIPAL="POSTGRES/{{postgres.vmName}}.{{shm.domain.fqdnLower}}" -KEYTAB_PATH="/var/lib/postgresql/data/postgres.keytab" -install -g postgres -o postgres -d /var/lib/postgresql/data/ # create the directory with correct ownership -echo "ldapsearch -b '{{postgres.ldapPostgresServiceAccountDn}}' -h {{shm.dc.hostnameUpper}}.{{shm.domain.fqdnLower}} -D '{{{postgres.ldapSearchUserDn}}}' -W msDS-KeyVersionNumber" -KVNO=$(ldapsearch -b "{{postgres.ldapPostgresServiceAccountDn}}" -h {{shm.dc.hostnameUpper}}.{{shm.domain.fqdnLower}} -D "{{{postgres.ldapSearchUserDn}}}" -w $(cat /etc/ldap.secret) msDS-KeyVersionNumber | grep "msDS-KeyVersionNumber:" | cut -d' ' -f2) -echo "Current KVNO is $KVNO" -# Use the same encryption methods and ordering as ktpass on Windows. The Active Directory default is RC4-HMAC. -# NB. Kerberos will preferentially choose AES-256, but Active Directory does not support it without configuration changes. We therefore do not include it in the keytab -DESCRC="add_entry -password -p $SERVICE_PRINCIPAL -k $KVNO -e des-cbc-crc\n$(cat /etc/postgres-service-account.secret)" -DESMD5="add_entry -password -p $SERVICE_PRINCIPAL -k $KVNO -e des-cbc-md5\n$(cat /etc/postgres-service-account.secret)" -A4HMAC="add_entry -password -p $SERVICE_PRINCIPAL -k $KVNO -e arcfour-hmac\n$(cat /etc/postgres-service-account.secret)" -AES128="add_entry -password -p $SERVICE_PRINCIPAL -k $KVNO -e aes128-cts-hmac-sha1-96\n$(cat /etc/postgres-service-account.secret)" -printf "%b" "$DESCRC\n$DESMD5\n$A4HMAC\n$AES128\nwrite_kt $KEYTAB_PATH" | ktutil -echo "" # for appropriate spacing after the ktutil command -# Set correct permissions for the keytab file -chown postgres:postgres $KEYTAB_PATH -chmod 0400 $KEYTAB_PATH -echo ">=== Testing credentials with kinit... ===<" -echo "klist -e -t -k $KEYTAB_PATH" -klist -e -t -k $KEYTAB_PATH -echo "kinit -t $KEYTAB_PATH $SERVICE_PRINCIPAL" -kinit -t $KEYTAB_PATH $SERVICE_PRINCIPAL -klist -# Set the appropriate keytab file -sed -i "s|#krb_server_keyfile|krb_server_keyfile = '$KEYTAB_PATH'\n#krb_server_keyfile|g" /etc/postgresql/12/main/postgresql.conf -grep krb_server_keyfile /etc/postgresql/12/main/postgresql.conf \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_initial_ldap_sync.sh b/deployment/secure_research_environment/cloud_init/resources/postgres_initial_ldap_sync.sh deleted file mode 100644 index 01d08c74c4..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_initial_ldap_sync.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash -echo ">=== Ensuring that LDAP sync has run... ===<" -sudo -i -u postgres pg_ldap_sync -c /etc/postgresql/12/main/pg-ldap-sync.yaml -vv 2>&1 \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_install_postgres_extensions.sql b/deployment/secure_research_environment/cloud_init/resources/postgres_install_postgres_extensions.sql deleted file mode 100644 index dafa18284b..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_install_postgres_extensions.sql +++ /dev/null @@ -1,14 +0,0 @@ -CREATE SCHEMA IF NOT EXISTS postgis; -CREATE EXTENSION IF NOT EXISTS postgis; -CREATE EXTENSION IF NOT EXISTS postgis_topology; -CREATE EXTENSION IF NOT EXISTS postgis_sfcgal; -CREATE EXTENSION IF NOT EXISTS pgrouting; -CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; -CREATE EXTENSION IF NOT EXISTS unaccent; -CREATE EXTENSION IF NOT EXISTS pg_trgm; -CREATE EXTENSION IF NOT EXISTS bloom; -CREATE EXTENSION IF NOT EXISTS citext; -CREATE EXTENSION IF NOT EXISTS cube; -CREATE EXTENSION IF NOT EXISTS file_fdw; -CREATE EXTENSION IF NOT EXISTS postgres_fdw; -CREATE EXTENSION IF NOT EXISTS earthdistance; \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_list_postgres_users.sh b/deployment/secure_research_environment/cloud_init/resources/postgres_list_postgres_users.sh deleted file mode 100644 index c21059a4c0..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_list_postgres_users.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/bash -# Restart the postgresql service -echo ">=== Restarting postgres... ===<" -systemctl restart postgresql@12-main -sleep 10 -systemctl status postgresql@12-main - -# Show postgres users and roles -echo ">=== List postgres users and roles... ===<" -echo "USERS:" -sudo -i -u postgres psql -q -c "SELECT * FROM pg_user;" -echo "ROLES:" -sudo -i -u postgres psql -q -c "SELECT rolname, rolsuper, rolinherit, rolinherit, rolcreatedb, rolcanlogin, oid FROM pg_roles;" -echo "SCHEMAS:" -sudo -i -u postgres psql -q -c "SELECT schema_name FROM information_schema.schemata;" \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_pg_ldap_sync.mustache.yaml b/deployment/secure_research_environment/cloud_init/resources/postgres_pg_ldap_sync.mustache.yaml deleted file mode 100644 index 7c6a74619b..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_pg_ldap_sync.mustache.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# LDAP-synchronized groups/users are identified through their -# membership of ldap_user and ldap_group. These two roles must -# therefore be manually defined before running pg_ldap_sync. - -# Connection parameters to LDAP server -# see also: http://net-ldap.rubyforge.org/Net/LDAP.html#method-c-new -# Ideally we would use SSL (as below) but this is not currently working -# port: 636 -# encryption: -# method: :simple_tls -ldap_connection: - host: {{shm.dc.hostname}} - port: 389 - auth: - method: :simple - username: {{{postgres.ldapSearchUserDn}}} - password: {{{postgres.ldapSearchUserPassword}}} - -# Search parameters for LDAP users which should be synchronized -ldap_users: - base: {{shm.domain.ous.researchUsers.path}} - # LDAP filter defining which LDAP users should be synchronized - filter: {{{postgres.ldapUserFilter}}} - # this attribute is used as PG role name - name_attribute: sAMAccountName - # lowercase name for use as PG role name - lowercase_name: false - -# Search parameters for LDAP groups which should be synchronized -ldap_groups: - base: {{shm.domain.ous.securityGroups.path}} - # LDAP filter defining which LDAP groups should be synchronized - filter: {{{postgres.ldapGroupFilter}}} - # this attribute is used as PG role name - name_attribute: cn - # lowercase name for use as PG role name - lowercase_name: false - # this attribute must reference to all member DN's of the given group - member_attribute: member - -# Connection parameters to PostgreSQL server -# see also: http://rubydoc.info/gems/pg/PG/Connection#initialize-instance_method -pg_connection: - host: - dbname: postgres - user: postgres - password: - -pg_users: - # Filter for identifying LDAP generated users in the database. - # It's the WHERE-condition to "SELECT rolname, oid FROM pg_roles" - filter: oid IN (SELECT pam.member FROM pg_auth_members pam JOIN pg_roles pr ON pr.oid=pam.roleid WHERE pr.rolname='ldap_users') - # Options for CREATE RULE statements - create_options: LOGIN IN ROLE ldap_users - -pg_groups: - # Filter for identifying LDAP generated groups in the database. - # It's the WHERE-condition to "SELECT rolname, oid FROM pg_roles" - filter: oid IN (SELECT pam.member FROM pg_auth_members pam JOIN pg_roles pr ON pr.oid=pam.roleid WHERE pr.rolname='ldap_groups') - # Options for CREATE RULE statements - create_options: NOLOGIN IN ROLE ldap_groups - grant_options: diff --git a/deployment/secure_research_environment/cloud_init/resources/postgres_set_public_schema_permissions.sh b/deployment/secure_research_environment/cloud_init/resources/postgres_set_public_schema_permissions.sh deleted file mode 100644 index 5974a9cb97..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/postgres_set_public_schema_permissions.sh +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash -# Set appropriate permissions on public schema -# - Remove privileges from PUBLIC (everyone) -# - Create the 'data' schema belonging to data-admins and grant read access to research users -# - Grant all privileges on the 'public' schema to research users -# - Grant superuser privileges to sysadmins -echo ">=== Setting appropriate permissions on public schema... ===<" -sudo -i -u postgres psql -f /opt/configuration/create-postgres-triggers.sql \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/project.default.xml b/deployment/secure_research_environment/cloud_init/resources/project.default.xml deleted file mode 100644 index 03d59ed3a5..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/project.default.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/deployment/secure_research_environment/cloud_init/resources/realmd.mustache.conf b/deployment/secure_research_environment/cloud_init/resources/realmd.mustache.conf deleted file mode 100644 index fcbcef677b..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/realmd.mustache.conf +++ /dev/null @@ -1,9 +0,0 @@ -[users] -default-home = /home/%u - -[active-directory] -os-name = Ubuntu Linux -os-version = unknown - -[{{shm.domain.fqdnLower}}] -fully-qualified-names = no diff --git a/deployment/secure_research_environment/cloud_init/resources/runonce.sh b/deployment/secure_research_environment/cloud_init/resources/runonce.sh deleted file mode 100644 index 892cea90f1..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/runonce.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/sh -for filepath in /etc/local/runonce.d/*; do - if [ -f "$filepath" ]; then - filename=$(basename $filepath) - "$filepath" | tee $filename.log - mkdir -p /etc/local/runonce.d/ran - timestamp=$(date -u --iso-8601=seconds | sed 's/[:+-]//g') - mv "$filepath" "/etc/local/runonce.d/ran/${filename}.${timestamp}" - mv "${filename}.log" "/etc/local/runonce.d/ran/${filename}.${timestamp}.log" - fi -done \ No newline at end of file diff --git a/deployment/secure_research_environment/cloud_init/resources/set_dns.mustache.sh b/deployment/secure_research_environment/cloud_init/resources/set_dns.mustache.sh deleted file mode 100644 index dff778cabd..0000000000 --- a/deployment/secure_research_environment/cloud_init/resources/set_dns.mustache.sh +++ /dev/null @@ -1,16 +0,0 @@ -#! /bin/bash - -# Ensure that the correct DNS servers are being used -sed -i -e "s/^[#]*DNS=.*/DNS={{shm.dc.ip}}/" -e "s/^[#]*FallbackDNS=.*/FallbackDNS={{shm.dcb.ip}}/" -e "s/^[#]Domains=.*/Domains={{shm.domain.fqdnLower}}/" /etc/systemd/resolved.conf - -# Add systemd-resolved to list of services to start on boot -systemctl enable systemd-resolved - -# Restart systemd networking -systemctl daemon-reload -systemctl restart systemd-networkd -systemctl restart systemd-resolved - -# Check DNS settings -ln -rsf /run/systemd/resolve/resolv.conf /etc/resolv.conf -grep -v "^#" /etc/resolv.conf diff --git a/deployment/secure_research_environment/cloud_init/resources/xrdp_custom_logo.bmp b/deployment/secure_research_environment/cloud_init/resources/xrdp_custom_logo.bmp deleted file mode 100644 index 43ffe2bdda..0000000000 Binary files a/deployment/secure_research_environment/cloud_init/resources/xrdp_custom_logo.bmp and /dev/null differ diff --git a/deployment/secure_research_environment/default.latex b/deployment/secure_research_environment/default.latex deleted file mode 100644 index a894cb2b08..0000000000 --- a/deployment/secure_research_environment/default.latex +++ /dev/null @@ -1,284 +0,0 @@ -\documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$babel-lang$,$endif$$if(papersize)$$papersize$paper,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{$documentclass$} - -\usepackage{float} -\let\origfigure=\figure -\let\endorigfigure=\endfigure -\renewenvironment{figure}[1][]{% - \origfigure[H] -}{% - \endorigfigure -} - -$if(beamerarticle)$ -\usepackage{beamerarticle} % needs to be loaded first -$endif$ -$if(fontfamily)$ -\usepackage[$for(fontfamilyoptions)$$fontfamilyoptions$$sep$,$endfor$]{$fontfamily$} -$else$ -\usepackage{lmodern} -$endif$ -$if(linestretch)$ -\usepackage{setspace} -\setstretch{$linestretch$} -$endif$ -\usepackage{amssymb,amsmath} -\usepackage{ifxetex,ifluatex} -\usepackage{fixltx2e} % provides \textsubscript -\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex - \usepackage[$if(fontenc)$$fontenc$$else$T1$endif$]{fontenc} - \usepackage[utf8]{inputenc} -$if(euro)$ - \usepackage{eurosym} -$endif$ -\else % if luatex or xelatex - \ifxetex - \usepackage{mathspec} - \else - \usepackage{fontspec} - \fi - \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} -$for(fontfamilies)$ - \newfontfamily{$fontfamilies.name$}[$fontfamilies.options$]{$fontfamilies.font$} -$endfor$ -$if(euro)$ - \newcommand{\euro}{€} -$endif$ -$if(mainfont)$ - \setmainfont[$for(mainfontoptions)$$mainfontoptions$$sep$,$endfor$]{$mainfont$} -$endif$ -$if(sansfont)$ - \setsansfont[$for(sansfontoptions)$$sansfontoptions$$sep$,$endfor$]{$sansfont$} -$endif$ -$if(monofont)$ - \setmonofont[Mapping=tex-ansi$if(monofontoptions)$,$for(monofontoptions)$$monofontoptions$$sep$,$endfor$$endif$]{$monofont$} -$endif$ -$if(mathfont)$ - \setmathfont(Digits,Latin,Greek)[$for(mathfontoptions)$$mathfontoptions$$sep$,$endfor$]{$mathfont$} -$endif$ -$if(CJKmainfont)$ - \usepackage{xeCJK} - \setCJKmainfont[$for(CJKoptions)$$CJKoptions$$sep$,$endfor$]{$CJKmainfont$} -$endif$ -\fi -% use upquote if available, for straight quotes in verbatim environments -\IfFileExists{upquote.sty}{\usepackage{upquote}}{} -% use microtype if available -\IfFileExists{microtype.sty}{% -\usepackage{microtype} -\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts -}{} -$if(geometry)$ -\usepackage[$for(geometry)$$geometry$$sep$,$endfor$]{geometry} -$endif$ -\usepackage[unicode=true]{hyperref} -$if(colorlinks)$ -\PassOptionsToPackage{usenames,dvipsnames}{color} % color is loaded by hyperref -$endif$ -\hypersetup{ -$if(title-meta)$ - pdftitle={$title-meta$}, -$endif$ -$if(author-meta)$ - pdfauthor={$author-meta$}, -$endif$ -$if(keywords)$ - pdfkeywords={$for(keywords)$$keywords$$sep$, $endfor$}, -$endif$ -$if(colorlinks)$ - colorlinks=true, - linkcolor=$if(linkcolor)$$linkcolor$$else$Maroon$endif$, - citecolor=$if(citecolor)$$citecolor$$else$Blue$endif$, - urlcolor=$if(urlcolor)$$urlcolor$$else$Blue$endif$, -$else$ - pdfborder={0 0 0}, -$endif$ - breaklinks=true} -\urlstyle{same} % don't use monospace font for urls -$if(lang)$ -\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex - \usepackage[shorthands=off,$for(babel-otherlangs)$$babel-otherlangs$,$endfor$main=$babel-lang$]{babel} -$if(babel-newcommands)$ - $babel-newcommands$ -$endif$ -\else - \usepackage{polyglossia} - \setmainlanguage[$polyglossia-lang.options$]{$polyglossia-lang.name$} -$for(polyglossia-otherlangs)$ - \setotherlanguage[$polyglossia-otherlangs.options$]{$polyglossia-otherlangs.name$} -$endfor$ -\fi -$endif$ -$if(natbib)$ -\usepackage{natbib} -\bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$} -$endif$ -$if(biblatex)$ -\usepackage[$if(biblio-style)$style=$biblio-style$,$endif$$for(biblatexoptions)$$biblatexoptions$$sep$,$endfor$]{biblatex} -$for(bibliography)$ -\addbibresource{$bibliography$} -$endfor$ -$endif$ -$if(listings)$ -\usepackage{listings} -$endif$ -$if(lhs)$ -\lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small\ttfamily}}{} -$endif$ -$if(highlighting-macros)$ -$highlighting-macros$ -$endif$ -$if(verbatim-in-note)$ -\usepackage{fancyvrb} -\VerbatimFootnotes % allows verbatim text in footnotes -$endif$ -$if(tables)$ -\usepackage{longtable,booktabs} -% Fix footnotes in tables (requires footnote package) -\IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{long table}}{} -$endif$ -$if(graphics)$ -\usepackage{graphicx,grffile} -\makeatletter -\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} -\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} -\makeatother -% Scale images if necessary, so that they will not overflow the page -% margins by default, and it is still possible to overwrite the defaults -% using explicit options in \includegraphics[width, height, ...]{} -\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} -$endif$ -$if(links-as-notes)$ -% Make links footnotes instead of hotlinks: -\renewcommand{\href}[2]{#2\footnote{\url{#1}}} -$endif$ -$if(strikeout)$ -\usepackage[normalem]{ulem} -% avoid problems with \sout in headers with hyperref: -\pdfstringdefDisableCommands{\renewcommand{\sout}{}} -$endif$ -$if(indent)$ -$else$ -\IfFileExists{parskip.sty}{% -\usepackage{parskip} -}{% else -\setlength{\parindent}{0pt} -\setlength{\parskip}{6pt plus 2pt minus 1pt} -} -$endif$ -\setlength{\emergencystretch}{3em} % prevent overfull lines -\providecommand{\tightlist}{% - \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} -$if(numbersections)$ -\setcounter{secnumdepth}{$if(secnumdepth)$$secnumdepth$$else$5$endif$} -$else$ -\setcounter{secnumdepth}{0} -$endif$ -$if(subparagraph)$ -$else$ -% Redefines (sub)paragraphs to behave more like sections -\ifx\paragraph\undefined\else -\let\oldparagraph\paragraph -\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} -\fi -\ifx\subparagraph\undefined\else -\let\oldsubparagraph\subparagraph -\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} -\fi -$endif$ -$if(dir)$ -\ifxetex - % load bidi as late as possible as it modifies e.g. graphicx - $if(latex-dir-rtl)$ - \usepackage[RTLdocument]{bidi} - $else$ - \usepackage{bidi} - $endif$ -\fi -\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex - \TeXXeTstate=1 - \newcommand{\RL}[1]{\beginR #1\endR} - \newcommand{\LR}[1]{\beginL #1\endL} - \newenvironment{RTL}{\beginR}{\endR} - \newenvironment{LTR}{\beginL}{\endL} -\fi -$endif$ - -% set default figure placement to htbp -\makeatletter -\def\fps@figure{htbp} -\makeatother - -$for(header-includes)$ -$header-includes$ -$endfor$ - -$if(title)$ -\title{$title$$if(thanks)$\thanks{$thanks$}$endif$} -$endif$ -$if(subtitle)$ -\providecommand{\subtitle}[1]{} -\subtitle{$subtitle$} -$endif$ -$if(author)$ -\author{$for(author)$$author$$sep$ \and $endfor$} -$endif$ -$if(institute)$ -\providecommand{\institute}[1]{} -\institute{$for(institute)$$institute$$sep$ \and $endfor$} -$endif$ -\date{$date$} - -\begin{document} -$if(title)$ -\maketitle -$endif$ -$if(abstract)$ -\begin{abstract} -$abstract$ -\end{abstract} -$endif$ - -$for(include-before)$ -$include-before$ - -$endfor$ -$if(toc)$ -{ -$if(colorlinks)$ -\hypersetup{linkcolor=$if(toccolor)$$toccolor$$else$black$endif$} -$endif$ -\setcounter{tocdepth}{$toc-depth$} -\tableofcontents -} -$endif$ -$if(lot)$ -\listoftables -$endif$ -$if(lof)$ -\listoffigures -$endif$ -$body$ - -$if(natbib)$ -$if(bibliography)$ -$if(biblio-title)$ -$if(book-class)$ -\renewcommand\bibname{$biblio-title$} -$else$ -\renewcommand\refname{$biblio-title$} -$endif$ -$endif$ -\bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} - -$endif$ -$endif$ -$if(biblatex)$ -\printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ - -$endif$ -$for(include-after)$ -$include-after$ - -$endfor$ - -\end{document} diff --git a/deployment/secure_research_environment/network_rules/sre-firewall-rules.json b/deployment/secure_research_environment/network_rules/sre-firewall-rules.json deleted file mode 100644 index 463c2069f5..0000000000 --- a/deployment/secure_research_environment/network_rules/sre-firewall-rules.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "routes": [ - { - "name": "ViaFirewall", - "properties": { - "addressPrefix": "0.0.0.0/0", - "nextHop": "{{shm.firewall.privateIpAddress}}" - } - }, - { - "name": "ViaVpn", - "properties": { - "addressPrefix": "{{shm.network.vpn.cidr}}", - "nextHop": "VirtualNetworkGateway" - } - } - ], - "networkRuleCollections": [], - "applicationRuleCollections": [], - "natRuleCollections": [] -} diff --git a/deployment/secure_research_environment/network_rules/sre-nsg-rules-compute.json b/deployment/secure_research_environment/network_rules/sre-nsg-rules-compute.json deleted file mode 100644 index 03b5f2ee02..0000000000 --- a/deployment/secure_research_environment/network_rules/sre-nsg-rules-compute.json +++ /dev/null @@ -1,232 +0,0 @@ -[ - { - "name": "AllowRemoteDesktopGatewayInbound", - "access": "Allow", - "description": "Allow connections to SRDs from remote desktop gateway", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "destinationPortRange": ["22", "3389"], - "direction": "Inbound", - "priority": 800, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{shm.network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowPrivateDataEndpointsOutbound", - "access": "Allow", - "description": "Allow outbound connections to private endpoints in the VNet", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.data.cidr}}", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 400, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - {{#sre.databases.enabled}} - { - "name": "AllowDatabasesSubnetOutbound", - "access": "Allow", - "description": "Allow outbound connections to the databases subnet", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "destinationPortRange": [ - {{#sre.databases.instances}} - "{{port}}", - {{/sre.databases.instances}} - ], - "direction": "Outbound", - "priority": 500, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - {{/sre.databases.enabled}} - { - "name": "AllowWebappsSubnetOutbound", - "access": "Allow", - "description": "Allow outbound http(s) connections to the webapps subnet", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 600, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsUdpOutbound", - "access": "Allow", - "description": "Allow domain-joined client requests over UDP: Kerberos; LDAP.", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "389"], - "direction": "Outbound", - "priority": 1000, - "protocol": "UDP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsTcpOutbound", - "access": "Allow", - "description": "Allow domain-joined client requests over TCP: (see https://devopstales.github.io/linux/pfsense-ad-join/ for details).", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "135", "139", "389", "445", "464", "636", "3268", "3269", "49152-65535"], - "direction": "Outbound", - "priority": 1100, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - {{#sre.repositories.network.cidr}} - { - "name": "AllowPackageRepositoriesOutbound", - "access": "Allow", - "description": "Allow package requests over http/https", - "destinationAddressPrefix": "{{sre.repositories.network.cidr}}", - "destinationPortRange": ["80", "443", "3128"], - "direction": "Outbound", - "priority": 1400, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - {{/sre.repositories.network.cidr}} - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#shm.time.ntp.serverAddresses}} - "{{.}}", - {{/shm.time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - {{#sre.remoteDesktop.networkRules.includeAzurePlatformDnsRule}} - { - "name": "{{sre.remoteDesktop.networkRules.outboundInternet}}ExternalAzurePlatformDnsOutbound", - "access": "{{sre.remoteDesktop.networkRules.outboundInternet}}", - "description": "{{sre.remoteDesktop.networkRules.outboundInternet}} outbound connections to Azure Platform DNS endpoints (including 168.63.129.16), which are not included in the 'Internet' service tag.", - "destinationAddressPrefix": "AzurePlatformDNS", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3100, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - {{/sre.remoteDesktop.networkRules.includeAzurePlatformDnsRule}} - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "{{sre.remoteDesktop.networkRules.outboundInternet}}ExternalInternetOutbound", - "access": "{{sre.remoteDesktop.networkRules.outboundInternet}}", - "description": "{{sre.remoteDesktop.networkRules.outboundInternet}} outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/secure_research_environment/network_rules/sre-nsg-rules-databases.json b/deployment/secure_research_environment/network_rules/sre-nsg-rules-databases.json deleted file mode 100644 index 378989ef12..0000000000 --- a/deployment/secure_research_environment/network_rules/sre-nsg-rules-databases.json +++ /dev/null @@ -1,172 +0,0 @@ -[ - {{#sre.databases.enabled}} - { - "name": "AllowComputeSubnetInbound", - "access": "Allow", - "description": "Allow inbound connections from the compute subnet", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "destinationPortRange": [ - {{#sre.databases.instances}} - "{{port}}", - {{/sre.databases.instances}} - ], - "direction": "Inbound", - "priority": 500, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "sourcePortRange": "*" - }, - {{/sre.databases.enabled}} - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{shm.network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsUdpOutbound", - "access": "Allow", - "description": "Allow domain-joined client requests over UDP: Kerberos; LDAP.", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "389"], - "direction": "Outbound", - "priority": 1000, - "protocol": "UDP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDomainJoinedClientsTcpOutbound", - "access": "Allow", - "description": "Allow domain-joined client requests over TCP: (see https://devopstales.github.io/linux/pfsense-ad-join/ for details).", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["88", "135", "139", "389", "445", "464", "636", "3268", "3269", "49152-65535"], - "direction": "Outbound", - "priority": 1100, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#shm.time.ntp.serverAddresses}} - "{{.}}", - {{/shm.time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowWindowsUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Windows update servers", - "destinationAddressPrefix": [ - {{#shm.monitoring.updateServers.externalIpAddresses.windows}} - "{{.}}", - {{/shm.monitoring.updateServers.externalIpAddresses.windows}} - ], - "destinationPortRange": ["80", "443"], - "direction": "Outbound", - "priority": 3700, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.databases.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/secure_research_environment/network_rules/sre-nsg-rules-deployment.json b/deployment/secure_research_environment/network_rules/sre-nsg-rules-deployment.json deleted file mode 100644 index 9cf0bffef9..0000000000 --- a/deployment/secure_research_environment/network_rules/sre-nsg-rules-deployment.json +++ /dev/null @@ -1,62 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.deployment.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{shm.network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalInternetOutbound", - "access": "Allow", - "description": "Allow outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.deployment.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/secure_research_environment/network_rules/sre-nsg-rules-guacamole.json b/deployment/secure_research_environment/network_rules/sre-nsg-rules-guacamole.json deleted file mode 100644 index d43f3015ff..0000000000 --- a/deployment/secure_research_environment/network_rules/sre-nsg-rules-guacamole.json +++ /dev/null @@ -1,178 +0,0 @@ -[ - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{shm.network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowUsersApprovedHttpsInbound", - "access": "Allow", - "description": "Allow inbound http(s) connections from clients to RDS server. Note that http requests will be upgraded.", - "destinationAddressPrefix": "{{sre.remoteDesktop.guacamole.ip}}", - "destinationPortRange": ["80", "443"], - "direction": "Inbound", - "priority": 2200, - "protocol": "TCP", - "sourceAddressPrefix": [ - {{#sre.remoteDesktop.networkRules.allowedSources}} - "{{.}}", - {{/sre.remoteDesktop.networkRules.allowedSources}} - ], - "sourcePortRange": "*" - }, - { - "name": "AllowExternalSslLabsHttpsInbound", - "access": "Allow", - "description": "Allow inbound https connections from ssllabs.com for SSL quality reporting", - "destinationAddressPrefix": "{{sre.remoteDesktop.guacamole.ip}}", - "destinationPortRange": "443", - "direction": "Inbound", - "priority": 3400, - "protocol": "TCP", - "sourceAddressPrefix": "64.41.200.0/24", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowSRDOutbound", - "access": "Allow", - "description": "Allow RDP connection to SRDs from Guacamole", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.compute.cidr}}", - "destinationPortRange": ["22", "3389"], - "direction": "Outbound", - "priority": 800, - "protocol": "*", - "sourceAddressPrefix": "{{sre.remoteDesktop.guacamole.ip}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLDAPOutbound", - "access": "Allow", - "description": "Allow LDAP queries to the SHM DC", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["389", "636"], - "direction": "Outbound", - "priority": 1000, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.remoteDesktop.guacamole.ip}}", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#shm.time.ntp.serverAddresses}} - "{{.}}", - {{/shm.time.ntp.serverAddresses}} - ], - "destinationPortRange": 123, - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.remoteDesktop.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalInternetOutbound", - "access": "Allow", - "description": "Allow outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{sre.remoteDesktop.guacamole.ip}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/secure_research_environment/network_rules/sre-nsg-rules-webapps.json b/deployment/secure_research_environment/network_rules/sre-nsg-rules-webapps.json deleted file mode 100644 index 013d76b71e..0000000000 --- a/deployment/secure_research_environment/network_rules/sre-nsg-rules-webapps.json +++ /dev/null @@ -1,150 +0,0 @@ -[ - { - "name": "AllowSRESubnetsInbound", - "access": "Allow", - "description": "Allow inbound http(s) connections from SRE subnets", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "destinationPortRange": ["80", "443"], - "direction": "inbound", - "priority": 600, - "protocol": "TCP", - "sourceAddressPrefix": ["{{sre.network.vnet.subnets.compute.cidr}}", "{{sre.network.vnet.subnets.remoteDesktop.cidr}}"], - "sourcePortRange": "*" - }, - { - "name": "AllowLinuxUpdatesOutbound", - "access": "Allow", - "description": "Allow outbound connections to Linux update servers", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.updateServers.cidr}}", - "destinationPortRange": ["8000"], - "direction": "Outbound", - "priority": 900, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowLDAPClientsOutbound", - "access": "Allow", - "description": "Allow LDAP client requests over UDP and TCP.", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": ["389"], - "direction": "Outbound", - "priority": 1000, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAdminVPNInbound", - "access": "Deny", - "description": "Deny connections from admin P2S VPN", - "destinationAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 2000, - "protocol": "*", - "sourceAddressPrefix": "{{shm.network.vpn.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherInbound", - "access": "Deny", - "description": "Deny all other inbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Inbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - }, - { - "name": "AllowDNSOutbound", - "access": "Allow", - "description": "Allow DNS requests to SHM", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.identity.cidr}}", - "destinationPortRange": "53", - "direction": "Outbound", - "priority": 1200, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowMonitoringToolsOutbound", - "access": "Allow", - "description": "Allow connections to local monitoring tools", - "destinationAddressPrefix": "{{shm.network.vnet.subnets.monitoring.cidr}}", - "destinationPortRange": "443", - "direction": "Outbound", - "priority": 1500, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalNTPOutbound", - "access": "Allow", - "description": "Allow outbound connections to external NTP servers", - "destinationAddressPrefix": [ - {{#shm.time.ntp.serverAddresses}} - "{{.}}", - {{/shm.time.ntp.serverAddresses}} - ], - "destinationPortRange": "123", - "direction": "Outbound", - "priority": 3000, - "protocol": "UDP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowExternalClamAVOutbound", - "access": "Allow", - "description": "Allow outbound connections to external ClamAV servers", - "destinationAddressPrefix": ["104.16.218.84", "104.16.219.84"], - "destinationPortRange": ["53", "443"], - "direction": "Outbound", - "priority": 3300, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "AllowAzureAutomationOutbound", - "access": "Allow", - "description": "Allow outbound connections to Azure automation servers", - "destinationAddressPrefix": "GuestAndHybridManagement", - "destinationPortRange": ["443"], - "direction": "Outbound", - "priority": 3800, - "protocol": "TCP", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "{{sre.remoteDesktop.networkRules.outboundInternet}}ExternalInternetOutbound", - "access": "{{sre.remoteDesktop.networkRules.outboundInternet}}", - "description": "{{sre.remoteDesktop.networkRules.outboundInternet}} outbound connections to internet", - "destinationAddressPrefix": "Internet", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 3900, - "protocol": "*", - "sourceAddressPrefix": "{{sre.network.vnet.subnets.webapps.cidr}}", - "sourcePortRange": "*" - }, - { - "name": "DenyAllOtherOutbound", - "access": "Deny", - "description": "Deny all other outbound traffic.", - "destinationAddressPrefix": "*", - "destinationPortRange": "*", - "direction": "Outbound", - "priority": 4096, - "protocol": "*", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } -] diff --git a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Create_DNS_Zone_Remote.ps1 b/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Create_DNS_Zone_Remote.ps1 deleted file mode 100644 index b86106c7c0..0000000000 --- a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Create_DNS_Zone_Remote.ps1 +++ /dev/null @@ -1,24 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $false, HelpMessage = "FQDN for the SRE")] - [string]$SreFqdn -) - -# Check whether the forward lookup zone exists, otherwise create it -# ----------------------------------------------------------------- -if (Get-DnsServerZone -Name $SreFqdn -ErrorAction SilentlyContinue) { - Write-Output "DNS zone for $SreFqdn already exists" -} else { - Write-Output " [ ] Creating DNS zone for '$SreFqdn'..." - Add-DnsServerPrimaryZone -Name $SreFqdn -ReplicationScope "Forest" -PassThru - if ($?) { - Write-Output " [o] Successfully created DNS zone for '$SreFqdn'" - } else { - Write-Output " [x] Failed to create DNS zone for '$SreFqdn'!" - } -} diff --git a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Create_New_SRE_User_Service_Accounts_Remote.ps1 b/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Create_New_SRE_User_Service_Accounts_Remote.ps1 deleted file mode 100644 index c58f666277..0000000000 --- a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Create_New_SRE_User_Service_Accounts_Remote.ps1 +++ /dev/null @@ -1,146 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# Fror details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(HelpMessage = "Name of security group that will contain SHM sysadmins")] - [ValidateNotNullOrEmpty()] - [String]$ShmSystemAdministratorSgName, - [Parameter(HelpMessage = "Base64-encoded group details")] - [ValidateNotNullOrEmpty()] - [String]$GroupsB64, - [Parameter(HelpMessage = "LDAP OU that SRE security groups belong to")] - [ValidateNotNullOrEmpty()] - [String]$SecurityOuPath, - [Parameter(HelpMessage = "LDAP OU that SRE service accounts belong to")] - [ValidateNotNullOrEmpty()] - [String]$ServiceOuPath, - [Parameter(HelpMessage = "Base64-encoded service user details")] - [ValidateNotNullOrEmpty()] - [String]$ServiceUsersB64 -) - -# Create a new security group -function New-ActiveDirectoryGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the group to be created")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "Description of the group to be created.")] - [string]$Description, - [Parameter(Mandatory = $true, HelpMessage = "Path that the group will be created under.")] - [string]$Path, - [Parameter(Mandatory = $true, HelpMessage = "Group category.")] - [string]$GroupCategory, - [Parameter(Mandatory = $true, HelpMessage = "Group scope.")] - [string]$GroupScope - ) - if (Get-ADGroup -Filter "Name -eq '$Name'") { - Write-Output " [o] Group '$Name' already exists" - } else { - Write-Output " [ ] Creating group '$Name' in OU '$Path'..." - New-ADGroup -Description $Description ` - -GroupCategory $GroupCategory ` - -GroupScope $GroupScope ` - -Name "$Name" ` - -Path $Path - if ($?) { - Write-Output " [o] Group '$Name' created" - } else { - Write-Output " [x] Failed to create group '$Name'!" - } - } -} - -# Create a new user -function New-ActiveDirectoryUser { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the user to be created.")] - [string]$Name, - [Parameter(Mandatory = $true, HelpMessage = "User password as a secure string.")] - [securestring]$PasswordSecureString, - [Parameter(Mandatory = $true, HelpMessage = "Path that the user will be created under.")] - [string]$Path, - [Parameter(Mandatory = $true, HelpMessage = "Security Account Manager (SAM) account name of the user. Maximum 20 characters for backwards compatibility.")] - [string]$SamAccountName - ) - if (Get-ADUser -Filter "SamAccountName -eq '$SamAccountName'") { - Write-Output " [o] User '$Name' ('$SamAccountName') already exists" - } else { - $UserPrincipalName = "${SamAccountName}@${shmFqdn}" - Write-Output " [ ] Creating user '$Name' ($SamAccountName)..." - New-ADUser -AccountPassword $PasswordSecureString ` - -Description "$Name" ` - -DisplayName "$Name" ` - -Enabled $true ` - -Name "$Name" ` - -PasswordNeverExpires $true ` - -Path $path ` - -UserPrincipalName $UserPrincipalName ` - -SamAccountName $SamAccountName - if ($?) { - Write-Output " [o] User '$Name' ($SamAccountName) created" - } else { - Write-Output " [x] Failed to create user '$Name' ($SamAccountName)!" - } - } -} - - -# Add a user to a group -function Add-ActiveDirectoryAccountToGroup { - param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the group that the user or group will be added to.")] - [string]$GroupName, - [Parameter(Mandatory = $true, HelpMessage = "Security Account Manager (SAM) account name of the Active Directory account.")] - [string]$SamAccountName - ) - $Account = Get-ADObject -Filter "SamAccountName -eq '$SamAccountName'" - # Note that Get-ADGroupMember suffers from this bug: https://docs.microsoft.com/en-us/troubleshoot/windows-server/identity/get-adgroupmember-error-remote-forest-members - if (Get-ADGroup -Identity "$GroupName" -Properties Members | Select-Object -ExpandProperty Members | Where-Object { $_ -eq $Account.DistinguishedName }) { - Write-Output " [o] Account '$SamAccountName' is already a member of '$GroupName'" - } else { - Write-Output " [ ] Adding '$SamAccountName' to group '$GroupName'..." - Add-ADGroupMember -Identity "$GroupName" -Members $Account.ObjectGUID - if ($?) { - Write-Output " [o] Account '$SamAccountName' was added to '$GroupName'" - } else { - Write-Output " [x] Account '$SamAccountName' could not be added to '$GroupName'!" - } - } -} - - -# Unserialise JSON and read into PSCustomObject -# --------------------------------------------- -$Groups = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($GroupsB64)) | ConvertFrom-Json -$ServiceUsers = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($ServiceUsersB64)) | ConvertFrom-Json - - -# Create SRE security groups -# -------------------------- -foreach ($Group in $Groups.PSObject.Members) { - if ($Group.TypeNameOfValue -ne "System.Management.Automation.PSCustomObject") { continue } - New-ActiveDirectoryGroup -Description $Group.Value.description ` - -GroupCategory "Security" ` - -GroupScope "Global" ` - -Name $Group.Value.name ` - -Path $SecurityOuPath -} - - -# Add SHM sysadmins group to the SRE sysadmins group -# -------------------------------------------------- -Add-ActiveDirectoryAccountToGroup -SamAccountName "$ShmSystemAdministratorSgName" -GroupName $Groups.systemAdministrators.name - - -# Create SRE service accounts -# --------------------------- -foreach ($ServiceUser in $ServiceUsers.PSObject.Members) { - if ($ServiceUser.TypeNameOfValue -ne "System.Management.Automation.PSCustomObject") { continue } - New-ActiveDirectoryUser -Name "$($ServiceUser.Value.name)" ` - -PasswordSecureString (ConvertTo-SecureString $ServiceUser.Value.password -AsPlainText -Force) ` - -Path $ServiceOuPath ` - -SamAccountName "$($ServiceUser.Value.samAccountName)" -} diff --git a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Remove_DNS_Entries_Remote.ps1 b/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Remove_DNS_Entries_Remote.ps1 deleted file mode 100644 index cade895b64..0000000000 --- a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Remove_DNS_Entries_Remote.ps1 +++ /dev/null @@ -1,67 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $false, HelpMessage = "FQDN for the SHM")] - [string]$ShmFqdn, - [Parameter(Mandatory = $false, HelpMessage = "FQDN for the SRE")] - [string]$SreFqdn, - [Parameter(Mandatory = $false, HelpMessage = "SRE ID")] - [string]$SreId, - [Parameter(Mandatory = $false, HelpMessage = "Base-64 encoded list of private DNS zone name-fragments to remove")] - [string]$PrivateEndpointFragmentsB64 -) - -# Deserialise Base-64 encoded variables -# ------------------------------------- -$PrivateEndpointFragments = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($PrivateEndpointFragmentsB64)) | ConvertFrom-Json - - -# Remove any records for domain-joined SRE VMs in the SHM zone -# ------------------------------------------------------------ -Write-Output "Removing SRE DNS records..." -foreach ($dnsRecord in (Get-DnsServerResourceRecord -ZoneName "$ShmFqdn" | Where-Object { $_.HostName -like "*$SreId" })) { - $dnsRecord | Remove-DnsServerResourceRecord -ZoneName "$ShmFqdn" -Force - if ($?) { - Write-Output " [o] Successfully removed DNS record '$($dnsRecord.HostName)'" - } else { - Write-Output " [x] Failed to remove DNS record '$($dnsRecord.HostName)'!" - } -} - - -# Remove the forward lookup zone if it exists -# ------------------------------------------- -Write-Output "Removing SRE DNS zone..." -if (Get-DnsServerZone -Name $SreFqdn -ErrorAction SilentlyContinue) { - Write-Output " [ ] Removing DNS zone for '$SreFqdn'..." - Remove-DnsServerZone -Name $SreFqdn -Force - if ($?) { - Write-Output " [o] Successfully removed DNS zone for '$SreFqdn'" - } else { - Write-Output " [x] Failed to removed DNS zone for '$SreFqdn'!" - } -} - - -# Remove private endpoint DNS Zone -# -------------------------------- -foreach ($PrivateEndpointFragment in $PrivateEndpointFragments) { - Write-Output " [ ] Ensuring that DNS zones matching '$PrivateEndpointFragment' are removed" - $DnsZones = Get-DnsServerZone | Where-Object { $_.ZoneName -like "${PrivateEndpointFragment}*.core.windows.net" } - if ($DnsZones) { - foreach ($DnsZone in $DnsZones) { - try { - $DnsZone | Remove-DnsServerZone -Force - Write-Output " [o] Successfully removed '$($DnsZone.ZoneName)' DNS zone" - } catch [System.ArgumentException] { - Write-Output " [x] Failed to remove '$($DnsZone.ZoneName)' DNS zone!" - } - } - } else { - Write-Output " [o] No DNS zones matching '$PrivateEndpointFragment' were found" - } -} diff --git a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Remove_Users_And_Groups_Remote.ps1 b/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Remove_Users_And_Groups_Remote.ps1 deleted file mode 100644 index 248d7fcedb..0000000000 --- a/deployment/secure_research_environment/remote/configure_shm_dc/scripts/Remove_Users_And_Groups_Remote.ps1 +++ /dev/null @@ -1,65 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $false, HelpMessage = "Base64-encoded list of SRE groups")] - [string]$groupNamesB64, - [Parameter(Mandatory = $false, HelpMessage = "Base64-encoded list of SRE users")] - [string]$userNamesB64, - [Parameter(Mandatory = $false, HelpMessage = "Base64-encoded list of SRE computers")] - [string]$computerNamePatternsB64 -) - -# Unserialise JSON and read into PSCustomObject -$groupNames = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($groupNamesB64)) | ConvertFrom-Json -$userNames = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($userNamesB64)) | ConvertFrom-Json -$computerNamePatterns = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($computerNamePatternsB64)) | ConvertFrom-Json - -# Remove users -Write-Output "Removing SRE users..." -foreach ($samAccountName in $userNames) { - if (Get-ADUser -Filter "SamAccountName -eq '$samAccountName'") { - Remove-ADUser (Get-ADUser $samAccountName) -Confirm:$False - if ($?) { - Write-Output " [o] Successfully removed user '$samAccountName'" - } else { - Write-Output " [x] Failed to remove user '$samAccountName'!" - exit 1 - } - } else { - Write-Output "No user named '$samAccountName' exists" - } -} - -# Remove computers -Write-Output "Removing SRE computers..." -foreach ($computerNamePattern in $computerNamePatterns) { - foreach ($computer in $(Get-ADComputer -Filter "Name -like '$computerNamePattern'")) { - $computer | Remove-ADObject -Recursive -Confirm:$False - if ($?) { - Write-Output " [o] Successfully removed computer '$($computer.Name)'" - } else { - Write-Output " [x] Failed to remove computer '$($computer.Name)'!" - exit 1 - } - } -} - -# Remove groups -Write-Output "Removing SRE groups..." -foreach ($groupName in $groupNames) { - if (Get-ADGroup -Filter "Name -eq '$groupName'") { - Remove-ADGroup (Get-ADGroup $groupName) -Confirm:$False - if ($?) { - Write-Output " [o] Successfully removed group '$groupName'" - } else { - Write-Output " [x] Failed to remove group '$groupName'!" - exit 1 - } - } else { - Write-Output "No group named '$groupName' exists" - } -} diff --git a/deployment/secure_research_environment/remote/create_databases/scripts/Create_Postgres_Service_Principal.ps1 b/deployment/secure_research_environment/remote/create_databases/scripts/Create_Postgres_Service_Principal.ps1 deleted file mode 100644 index 7b15df0861..0000000000 --- a/deployment/secure_research_environment/remote/create_databases/scripts/Create_Postgres_Service_Principal.ps1 +++ /dev/null @@ -1,42 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(HelpMessage = "Hostname for the VM")] - [string]$Hostname, - [Parameter(HelpMessage = "Name/description for the service account user")] - [string]$Name, - [Parameter(HelpMessage = "sAMAccountName for the service account user (must be unique in the Active Directory)")] - [string]$SamAccountName, - [Parameter(HelpMessage = "FQDN for the SHM")] - [string]$ShmFqdn, - [Parameter(HelpMessage = "Name of the service we are registering against")] - [string]$ServiceName = "POSTGRES" -) - -# Initialise SPN and UPN. NB. they must have this *exact* name for authentication to work -$servicePrincipalName = "${ServiceName}/${Hostname}.$($ShmFqdn.ToLower())" -$userPrincipalName = "${servicePrincipalName}@$($ShmFqdn.ToUpper())" - -# Ensure that the service account user exists in the AD -Write-Output " [ ] Ensuring that account '$Name' ($SamAccountName) exists" -$adUser = Get-ADUser -Filter "SamAccountName -eq '$SamAccountName'" -if ($? -And $adUser) { - Write-Output " [o] Found user '$Name' ($SamAccountName)" -} else { - Write-Output " [x] Failed to find user '$Name' ($SamAccountName)!" - exit 1 -} - -# Set the service principal details -Write-Output " [ ] Ensuring that '$Name' ($SamAccountName) is registered as a service principal" -$adUser | Set-ADUser -ServicePrincipalNames @{Replace = $servicePrincipalName } -UserPrincipalName "$userPrincipalName" -if ($?) { - Write-Output " [o] Registered '$Name' ($SamAccountName) as '$servicePrincipalName'" -} else { - Write-Output " [x] Failed to register '$Name' ($SamAccountName) as '$servicePrincipalName'!" - exit 1 -} diff --git a/deployment/secure_research_environment/remote/create_databases/scripts/Lockdown_Sql_Server.ps1 b/deployment/secure_research_environment/remote/create_databases/scripts/Lockdown_Sql_Server.ps1 deleted file mode 100644 index 101c9f2f7b..0000000000 --- a/deployment/secure_research_environment/remote/create_databases/scripts/Lockdown_Sql_Server.ps1 +++ /dev/null @@ -1,216 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(Mandatory = $true, HelpMessage = "Domain-qualified name for the SRE-level data administrators group")] - [string]$DataAdminGroup, - [Parameter(Mandatory = $true, HelpMessage = "Base-64 encoded password for SQL AuthUpdate User")] - [string]$DbAdminPasswordB64, - [Parameter(Mandatory = $true, HelpMessage = "Name of SQL AuthUpdate User")] - [string]$DbAdminUsername, - [Parameter(Mandatory = $true, HelpMessage = "Whether SSIS should be enabled")] - [string]$EnableSSIS, # it is not possible to pass a bool through the Invoke-RemoteScript interface - [Parameter(Mandatory = $true, HelpMessage = "Domain-qualified name for the SRE-level research users group")] - [string]$ResearchUsersGroup, - [Parameter(Mandatory = $true, HelpMessage = "Server lockdown command")] - [string]$ServerLockdownCommandB64, - [Parameter(Mandatory = $true, HelpMessage = "Domain-qualified name for the SRE-level system administrators group")] - [string]$SysAdminGroup, - [Parameter(Mandatory = $true, HelpMessage = "Name of local admin user on this machine")] - [string]$VmAdminUsername -) - -Import-Module SqlServer -ErrorAction Stop - - -# Create SQL admin credentials object -# ----------------------------------- -$serverName = $(hostname) -$dbAdminPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($DbAdminPasswordB64)) -$secureDbAdminPassword = (ConvertTo-SecureString $dbAdminPassword -AsPlainText -Force) -$sqlAdminCredentials = New-Object System.Management.Automation.PSCredential($DbAdminUsername, $secureDbAdminPassword) -$connectionTimeoutInSeconds = 5 -$EnableSSIS = [System.Convert]::ToBoolean($EnableSSIS) - - -# Ensure that SSIS is enabled/disabled as requested -# ------------------------------------------------- -if ($EnableSSIS) { - Write-Output "Ensuring that SSIS services (SSISTELEMETRY and MsDtsServer) are enabled on: '$serverName'" - Get-Service SSISTELEMETRY160, MsDtsServer160 | Start-Service -PassThru | Set-Service -StartupType Automatic -} else { - Write-Output "Ensuring that SSIS services (SSISTELEMETRY and MsDtsServer) are disabled on: '$serverName'" - Get-Service SSISTELEMETRY160, MsDtsServer160 | Stop-Service -PassThru | Set-Service -StartupType Disabled -} -if ($?) { - Write-Output " [o] Successfully updated SSIS services state on: '$serverName'" -} else { - Write-Output " [x] Failed to updated SSIS services state on: '$serverName'!" - exit 1 -} - - -# Disable unused SQL Server services -# ---------------------------------- -Write-Output "Disable unused SQL server services on: '$serverName'..." -Get-Service SSASTELEMETRY, MSSQLServerOlapService, SQLBrowser | Stop-Service -PassThru | Set-Service -StartupType disabled -if ($?) { - Write-Output " [o] Successfully disabled unused services (SSASTELEMETRY, MSSQLServerOlapService, SQLBrowser) on: '$serverName'" -} else { - Write-Output " [x] Failed to disable unused SQL server services on: '$serverName'!" - exit 1 -} - - -# Check whether the auth update user exists and has admin rights -# -------------------------------------------------------------- -Write-Output "Checking that the $DbAdminUsername user has admin permissions on: '$serverName'..." -$loginExists = Get-SqlLogin -ServerInstance $serverName -Credential $sqlAdminCredentials -ErrorAction SilentlyContinue -ErrorVariable operationFailed | Where-Object { $_.Name -eq $DbAdminUsername } -$smo = (New-Object Microsoft.SqlServer.Management.Smo.Server $serverName) -$smo.ConnectionContext.LoginSecure = $false # disable the default use of Windows credentials -$smo.ConnectionContext.set_Login($sqlAdminCredentials.UserName) -$smo.ConnectionContext.set_SecurePassword($sqlAdminCredentials.Password) -$isAdmin = $smo.Roles | Where-Object { $_.Name -Like "*admin*" } | Where-Object { $_.EnumServerRoleMembers() -Contains $DbAdminUsername } - -# If the DbAdminUsername is not found then something has gone wrong -if ($operationFailed -Or (-Not $loginExists)) { - Write-Output " [x] $DbAdminUsername does not exist on: '$serverName'!" - exit 1 - -# If the DbAdminUsername is not an admin, then we are not able to do anything else. -# Hopefully this is because lockdown has already been run. -} elseif (-Not $isAdmin) { - Write-Output " [o] $DbAdminUsername is not an admin on: '$serverName'. Have you already locked this server down?" - -# ... otherwise we continue with the server lockdown -} else { - Write-Output " [o] $DbAdminUsername has admin privileges on: '$serverName'" - - # Give the configured domain groups login access to the SQL Server - # ---------------------------------------------------------------- - foreach ($domainGroup in @($SysAdminGroup, $DataAdminGroup, $ResearchUsersGroup)) { - Write-Output "Ensuring that '$domainGroup' has SQL login access to: '$serverName'..." - if (Get-SqlLogin -ServerInstance $serverName -Credential $sqlAdminCredentials | Where-Object { $_.Name -eq $domainGroup } ) { - Write-Output " [o] '$domainGroup' already has SQL login access to: '$serverName'" - } else { - $null = Add-SqlLogin -ConnectionTimeout $connectionTimeoutInSeconds -GrantConnectSql -ServerInstance $serverName -LoginName $domainGroup -LoginType "WindowsGroup" -Credential $sqlAdminCredentials -ErrorAction SilentlyContinue -ErrorVariable operationFailed - if ($? -And -Not $operationFailed) { - Write-Output " [o] Successfully gave '$domainGroup' SQL login access to: '$serverName'" - } else { - Write-Output " [x] Failed to give '$domainGroup' SQL login access to: '$serverName'!" - exit 1 - } - } - # Create a DB user for each login group - Write-Output "Ensuring that an SQL user exists for '$domainGroup' on: '$serverName'..." - $sqlCommand = "IF NOT EXISTS(SELECT * FROM sys.database_principals WHERE name = '$domainGroup') CREATE USER [$domainGroup] FOR LOGIN [$domainGroup];" - Invoke-SqlCmd -ServerInstance $serverName -Credential $sqlAdminCredentials -QueryTimeout $connectionTimeoutInSeconds -Query "$sqlCommand" -TrustServerCertificate -ErrorAction SilentlyContinue -ErrorVariable sqlErrorMessage -OutputSqlErrors $true - if ($? -And -Not $sqlErrorMessage) { - Write-Output " [o] Ensured that '$domainGroup' user exists on: '$serverName'" - Start-Sleep -s 10 # allow time for the database action to complete - } else { - Write-Output " [x] Failed to ensure that '$domainGroup' user exists on: '$serverName'!" - Write-Output "Failed SQL command was: $sqlCommand" - Write-Output "Error message: $sqlErrorMessage" - exit 1 - } - } - - # Create the data and public schemas - # ---------------------------------- - foreach ($groupSchemaTuple in @(($DataAdminGroup, "data"), ($ResearchUsersGroup, "dbopublic"))) { - $domainGroup, $schemaName = $groupSchemaTuple - $sqlCommand = "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = N'$schemaName') EXEC('CREATE SCHEMA $schemaName AUTHORIZATION [$domainGroup]');" - Invoke-SqlCmd -ServerInstance $serverName -Credential $sqlAdminCredentials -QueryTimeout $connectionTimeoutInSeconds -Query $sqlCommand -TrustServerCertificate -ErrorAction SilentlyContinue -ErrorVariable sqlErrorMessage -OutputSqlErrors $true - if ($? -And -Not $sqlErrorMessage) { - Write-Output " [o] Successfully ensured that '$schemaName' schema exists on: '$serverName'" - Start-Sleep -s 10 # allow time for the database action to complete - } else { - Write-Output " [x] Failed to ensure that '$schemaName' schema exists on: '$serverName'!" - Write-Output "Failed SQL command was: $sqlCommand" - Write-Output "Error message: $sqlErrorMessage" - exit 1 - } - } - - - # Give domain groups appropriate roles on the SQL Server - # ------------------------------------------------------ - foreach ($groupRoleTuple in @(($SysAdminGroup, "sysadmin"), ($DataAdminGroup, "dataadmin"), ($ResearchUsersGroup, "researchuser"))) { - $domainGroup, $role = $groupRoleTuple - if ($role -eq "sysadmin") { - # this is a server-level role - $sqlCommand = "ALTER SERVER ROLE [$role] ADD MEMBER [$domainGroup];" - } elseif ($role -eq "dataadmin") { - # this is a schema-level permission set - $sqlCommand = "GRANT CONTROL ON SCHEMA::data TO [$domainGroup];" - } elseif ($role -eq "researchuser") { - # this is a schema-level permission set - $sqlCommand = "ALTER USER [$domainGroup] WITH DEFAULT_SCHEMA=[dbopublic]; USE master; GRANT CONNECT TO [$domainGroup]; GRANT SHOWPLAN TO [$domainGroup]; GRANT SELECT ON SCHEMA::data TO [$domainGroup]; GRANT CREATE TABLE TO [$domainGroup];" - } else { - Write-Output " [x] Role $role not recognised!" - continue - } - Invoke-SqlCmd -ServerInstance $serverName -Credential $sqlAdminCredentials -QueryTimeout $connectionTimeoutInSeconds -Query $sqlCommand -TrustServerCertificate -ErrorAction SilentlyContinue -ErrorVariable sqlErrorMessage -OutputSqlErrors $true - if ($? -And -Not $sqlErrorMessage) { - Write-Output " [o] Successfully gave '$domainGroup' $role permissions on: '$serverName'" - Start-Sleep -s 10 # allow time for the database action to complete - } else { - Write-Output " [x] Failed to give '$domainGroup' $role permissions on: '$serverName'!" - Write-Output "Failed SQL command was: $sqlCommand" - Write-Output "Error message: $sqlErrorMessage" - exit 1 - } - } - - - # Run the scripted SQL Server lockdown - # ------------------------------------ - Write-Output "Running T-SQL lockdown script on: '$serverName'..." - $sqlCommand = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($ServerLockdownCommandB64)) - Invoke-SqlCmd -ServerInstance $serverName -Credential $sqlAdminCredentials -QueryTimeout $connectionTimeoutInSeconds -Query $sqlCommand -TrustServerCertificate -ErrorAction SilentlyContinue -ErrorVariable sqlErrorMessage -OutputSqlErrors $true - if ($? -And -Not $sqlErrorMessage) { - Write-Output " [o] Successfully ran T-SQL lockdown script on: '$serverName'" - } else { - Write-Output " [x] Failed to run T-SQL lockdown script on: '$serverName'!" - Write-Output "Failed SQL command was: $sqlCommand" - Write-Output "Error message: $sqlErrorMessage" - exit 1 - } - - - # Removing database access from the local Windows admin - # ----------------------------------------------------- - $windowsAdmin = "${serverName}\${VmAdminUsername}" - Write-Output "Removing database access from $windowsAdmin on: '$serverName'..." - $sqlCommand = "DROP USER IF EXISTS [$windowsAdmin]; IF EXISTS(SELECT * FROM master.dbo.syslogins WHERE loginname = '$windowsAdmin') DROP LOGIN [$windowsAdmin]" - Invoke-SqlCmd -ServerInstance $serverName -Credential $sqlAdminCredentials -QueryTimeout $connectionTimeoutInSeconds -Query $sqlCommand -TrustServerCertificate -ErrorAction SilentlyContinue -ErrorVariable sqlErrorMessage -OutputSqlErrors $true - if ($? -And -Not $sqlErrorMessage) { - Write-Output " [o] Successfully removed database access for $windowsAdmin on: '$serverName'" - Start-Sleep -s 10 # allow time for the database action to complete - } else { - Write-Output " [x] Failed to remove database access for $windowsAdmin on: '$serverName'!" - Write-Output "Failed SQL command was: $sqlCommand" - Write-Output "Error message: $sqlErrorMessage" - exit 1 - } - - - # Revoke the sysadmin role from the SQL AuthUpdateUser used to build the SQL Server - # --------------------------------------------------------------------------------- - Write-Output "Revoking sysadmin role from $DbAdminUsername on: '$serverName'..." - $sqlCommand = "ALTER SERVER ROLE sysadmin DROP MEMBER $DbAdminUsername;" - Invoke-SqlCmd -ServerInstance $serverName -Credential $sqlAdminCredentials -QueryTimeout $connectionTimeoutInSeconds -Query $sqlCommand -TrustServerCertificate -ErrorAction SilentlyContinue -ErrorVariable sqlErrorMessage -OutputSqlErrors $true - if ($? -And -Not $sqlErrorMessage) { - Write-Output " [o] Successfully revoked sysadmin role on: '$serverName'" - Start-Sleep -s 10 # allow time for the database action to complete - } else { - Write-Output " [x] Failed to revoke sysadmin role on: '$serverName'!" - Write-Output "Failed SQL command was: $sqlCommand" - Write-Output "Error message: $sqlErrorMessage" - exit 1 - } -} diff --git a/deployment/secure_research_environment/remote/create_databases/scripts/sre-mssql2019-server-lockdown.sql b/deployment/secure_research_environment/remote/create_databases/scripts/sre-mssql2019-server-lockdown.sql deleted file mode 100644 index 82a85efbff..0000000000 --- a/deployment/secure_research_environment/remote/create_databases/scripts/sre-mssql2019-server-lockdown.sql +++ /dev/null @@ -1,107 +0,0 @@ -/* -N.M numberings above each lockdown command reference recommendations in -the "CIS Microsoft SQL Server 2019 Benchmark v1.0.0" available from -https://downloads.cisecurity.org/. -*/ -PRINT '2.1 - Ensure Ad Hoc Distributed Queries Server Configuration Option is set to 0' -EXECUTE sp_configure 'show advanced options', 1; -RECONFIGURE; -EXECUTE sp_configure 'Ad Hoc Distributed Queries', 0; -RECONFIGURE; -GO -EXECUTE sp_configure 'show advanced options', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.2 - Ensure CLR Enabled Server Configuration Option is set to 0' -EXECUTE sp_configure 'clr enabled', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.3 - Ensure Cross DB Ownership Chaining Server Configuration Option is set to 0' -EXECUTE sp_configure 'cross db ownership chaining', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.4 - Ensure Database Mail XPs Server Configuration Option is set to 0' -EXECUTE sp_configure 'show advanced options', 1; -RECONFIGURE; -EXECUTE sp_configure 'Database Mail XPs', 0; -RECONFIGURE; -GO -EXECUTE sp_configure 'show advanced options', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.5 - Ensure Ole Automation Procedures Server Configuration Option is set to 0' -EXECUTE sp_configure 'show advanced options', 1; -RECONFIGURE; -EXECUTE sp_configure 'Ole Automation Procedures', 0; -RECONFIGURE; -GO -EXECUTE sp_configure 'show advanced options', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.6 - Ensure Remote Access Server Configuration Option is set to 0' -EXECUTE sp_configure 'show advanced options', 1; -RECONFIGURE; -EXECUTE sp_configure 'remote access', 0; -RECONFIGURE; -GO -EXECUTE sp_configure 'show advanced options', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.7 - Ensure Remote Admin Connections Server Configuration Option is set to 0' -EXECUTE sp_configure 'remote admin connections', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.8 - Ensure Scan For Startup Procs Server Configuration Option is set to 0' -EXECUTE sp_configure 'show advanced options', 1; -RECONFIGURE; -EXECUTE sp_configure 'scan for startup procs', 0; -RECONFIGURE; -GO -EXECUTE sp_configure 'show advanced options', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '2.9 - Ensure Trustworthy Database Property is set to Off' -ALTER DATABASE master SET TRUSTWORTHY OFF; -GO - -PRINT '' -PRINT '2.12 - Ensure Hide Instance option is set to Yes for Production SQL Server instances' -EXEC master.sys.xp_instance_regwrite @rootkey = N'HKEY_LOCAL_MACHINE', @key = N'SOFTWARE\Microsoft\Microsoft SQL Server\MSSQLServer\SuperSocketNetLib', @value_name = N'HideInstance', @type = N'REG_DWORD', @value = 1; -GO - -PRINT '' -PRINT '5.1 - Ensure Maximum number of error log files is set to greater than or equal to 12' -EXEC master.sys.xp_instance_regwrite N'HKEY_LOCAL_MACHINE', N'Software\Microsoft\MSSQLServer\MSSQLServer', N'NumErrorLogs', REG_DWORD, 14; -GO - -PRINT '' -PRINT '5.2 - Ensure Default Trace Enabled Server Configuration Option is set to 1' -EXECUTE sp_configure 'show advanced options', 1; -RECONFIGURE; -EXECUTE sp_configure 'default trace enabled', 1; -RECONFIGURE; -GO -EXECUTE sp_configure 'show advanced options', 0; -RECONFIGURE; -GO - -PRINT '' -PRINT '5.3 - Ensure Login Auditing is set to failed logins' -EXEC xp_instance_regwrite N'HKEY_LOCAL_MACHINE', N'Software\Microsoft\MSSQLServer\MSSQLServer', N'AuditLevel', REG_DWORD, 2 -GO diff --git a/deployment/secure_research_environment/remote/create_guacamole/scripts/install_ssl_certificate.sh b/deployment/secure_research_environment/remote/create_guacamole/scripts/install_ssl_certificate.sh deleted file mode 100644 index 9de89f6df9..0000000000 --- a/deployment/secure_research_environment/remote/create_guacamole/scripts/install_ssl_certificate.sh +++ /dev/null @@ -1,32 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# It expects the following parameters: -# USER_FRIENDLY_FQDN -# CERT_THUMBPRINT - -# Remove existing certificates -SSL_DIR="/opt/ssl/conf/live/${USER_FRIENDLY_FQDN}" -sudo rm -rf "${SSL_DIR}/*" -sudo mkdir -p "${SSL_DIR}/*" - -# Import the certificates from the VM secret store -sudo cp "/var/lib/waagent/${CERT_THUMBPRINT}.crt" "${SSL_DIR}/cert.pem" -sudo cp "/var/lib/waagent/${CERT_THUMBPRINT}.prv" "${SSL_DIR}/privkey.pem" - -# Download the Let's Encrypt intermediate certificate -LETS_ENCRYPT_CERTIFICATE_PATH=/opt/ssl/lets-encrypt-r3.pem -if [ ! -e "$LETS_ENCRYPT_CERTIFICATE_PATH" ]; then - echo "Downloading Let's Encrypt intermediate certificate..." - wget -O "$LETS_ENCRYPT_CERTIFICATE_PATH" https://letsencrypt.org/certs/lets-encrypt-r3.pem 2>&1 -fi - -# Create a certificate chain from the certificate and intermediate certificate -echo "Creating fullchain certificate..." -cd "${SSL_DIR}" || exit 1 -cat cert.pem "$LETS_ENCRYPT_CERTIFICATE_PATH" > fullchain.pem -ls -alh - -# Force docker services to reload -sudo docker compose -f /opt/guacamole/docker-compose.yaml up --force-recreate -d 2>&1 diff --git a/deployment/secure_research_environment/remote/create_storage/Set_DNS_Zone.ps1 b/deployment/secure_research_environment/remote/create_storage/Set_DNS_Zone.ps1 deleted file mode 100644 index 63c967d3a7..0000000000 --- a/deployment/secure_research_environment/remote/create_storage/Set_DNS_Zone.ps1 +++ /dev/null @@ -1,55 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(HelpMessage = "FQDNs for which to create DNS zones", Mandatory = $false)] - [string]$privateEndpointFqdnsB64, - [Parameter(HelpMessage = "IP address", Mandatory = $false)] - [string]$IpAddress -) - - -# Deserialise Base-64 encoded variables -# ------------------------------------- -$privateEndpointFqdns = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($privateEndpointFqdnsB64)) | ConvertFrom-Json - - -# Deserialise FQDN names and configure the DNS record for each one -# ---------------------------------------------------------------- -foreach ($privateEndpointFqdn in $privateEndpointFqdns) { - # Check whether the zone exists otherwise create it - if (Get-DnsServerZone -Name $privateEndpointFqdn -ErrorAction SilentlyContinue | Where-Object { $_.ZoneType -eq "Primary" }) { - Write-Output "DNS zone $privateEndpointFqdn already exists" - } else { - Write-Output " [ ] Creating DNS primary zone for $privateEndpointFqdn..." - Add-DnsServerPrimaryZone -Name $privateEndpointFqdn -ReplicationScope "Forest" - if ($?) { - Write-Output " [o] Successfully created DNS primary zone for $privateEndpointFqdn" - } else { - Write-Output " [x] Failed to create DNS primary zone for $privateEndpointFqdn!" - } - } - - # If the record exists then remove it - if (Get-DnsServerResourceRecord -ZoneName $privateEndpointFqdn -RRType "A" -name "@" -ErrorAction SilentlyContinue) { - Write-Output " [ ] Removing existing DNS record $privateEndpointFqdn..." - Remove-DnsServerResourceRecord -ZoneName $privateEndpointFqdn -RRType "A" -Name "@" -Force -ErrorVariable Failed -ErrorAction SilentlyContinue - if ($? -and -not $Failed) { - Write-Output " [o] Successfully removed DNS record $privateEndpointFqdn" - } else { - Write-Output " [x] Failed to remove DNS record $privateEndpointFqdn!" - } - } - - # Create the record - Write-Output " [ ] Creating DNS record for $privateEndpointFqdn..." - Add-DnsServerResourceRecordA -Name $privateEndpointFqdn -ZoneName $privateEndpointFqdn -IPv4Address $IpAddress - if ($?) { - Write-Output " [o] Successfully created DNS record for $privateEndpointFqdn" - } else { - Write-Output " [x] Failed to create DNS record for $privateEndpointFqdn!" - } -} diff --git a/deployment/secure_research_environment/remote/network_configuration/scripts/Configure_External_DNS_Queries_Remote.ps1 b/deployment/secure_research_environment/remote/network_configuration/scripts/Configure_External_DNS_Queries_Remote.ps1 deleted file mode 100644 index 2ebc6b7d49..0000000000 --- a/deployment/secure_research_environment/remote/network_configuration/scripts/Configure_External_DNS_Queries_Remote.ps1 +++ /dev/null @@ -1,210 +0,0 @@ -# Don't make parameters mandatory as if there is any issue binding them, the script will prompt for them -# and remote execution will stall waiting for the non-present user to enter the missing parameter on the -# command line. This take up to 90 minutes to timeout, though you can try running resetState.cmd in -# C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.0 on the remote VM to cancel a stalled -# job, but this does not seem to have an immediate effect -# For details, see https://docs.microsoft.com/en-gb/azure/virtual-machines/windows/run-command -param( - [Parameter(HelpMessage = "Comma separated list of FQDNs that are always allowed.")] - [string]$AllowedFqdnsCommaSeparatedList, - [Parameter(HelpMessage = "Comma separated list of CIDR ranges to block external DNS resolution for.")] - [string]$RestrictedCidrsCommaSeparatedList, - [Parameter(HelpMessage = "SRE ID")] - [string]$SreId, - [Parameter(Mandatory = $false, HelpMessage = "Comma separated list of CIDR ranges to allow default DNS resolution rules for.")] - [string]$UnrestrictedCidrsCommaSeparatedList = $null -) - - -# Generate DNS client subnet name from CIDR -# ----------------------------------------- -function Get-DnsClientSubnetNameFromCidr { - param( - [Parameter(HelpMessage = "SRE prefix")] - [string]$SrePrefix, - [Parameter(HelpMessage = "CIDR")] - [string]$Cidr - ) - return "$SrePrefix-$($Cidr.Replace('/','_'))" -} - - -# Ensure that a DNS client subnet exists -# -------------------------------------- -function Set-DnsClientSubnets { - param( - [Parameter(HelpMessage = "CIDR")] - [string]$Cidr, - [Parameter(HelpMessage = "Subnet name")] - [string]$SubnetName - ) - $subnet = Get-DnsServerClientSubnet -Name $SubnetName -ErrorAction SilentlyContinue - if ($subnet) { - Write-Output " [o] DNS client subnet '$SubnetName' for CIDR '$Cidr' already exists." - } else { - try { - $subnet = Add-DnsServerClientSubnet -Name $SubnetName -IPv4Subnet $Cidr - Write-Output " [o] Successfully created DNS client subnet '$SubnetName' for CIDR '$Cidr'" - } catch { - Write-Output " [x] Failed to create DNS client subnet '$SubnetName' for CIDR '$Cidr'" - Write-Output $_.Exception - } - } -} - - -# Ensure that a DNS server resolution policy exists -# ------------------------------------------------- -function Set-DnsQueryResolutionPolicy { - param( - [Parameter(HelpMessage = "Comma-separated list of allowed FQDNs")] - [string]$AllowedFqdns, - [Parameter(HelpMessage = "Recursion policy")] - [bool]$RestrictRecursion, - [Parameter(HelpMessage = "Subnet name")] - [string]$SubnetName - ) - # If we are restricting access than attach to the scope with recursion disabled - if ($RestrictRecursion) { - $recursionType = "RecursionRestricted" - $recursionScopeName = "RecursionBlocked" - # Ensure blocked recursion scope exists - $recursionScope = (Get-DnsServerRecursionScope | Where-Object { $_.Name -eq $recursionScopeName }) - if (-not $recursionScope) { - Add-DnsServerRecursionScope -Name $recursionScopeName -EnableRecursion $false - } else { - Set-DnsServerRecursionScope -Name $recursionScopeName -EnableRecursion $false - } - } else { - $recursionType = "RecursionAllowed" - $recursionScopeName = "." - } - try { - # Always allow recursion for approved FQDNs - $null = Add-DnsServerQueryResolutionPolicy -Name "${subnetName}-ApprovedFqdns-DnsRecursionAllowed" -Action ALLOW -ClientSubnet "EQ,$SubnetName" -Condition "AND" -FQDN "EQ,$AllowedFqdns" -ApplyOnRecursion -RecursionScope "." - Write-Output " [o] Set DNS 'RecursionAllowed' for approved FQDNs on client subnet '$SubnetName'" - # For non-approved FQDNs allow or forbid based on the '$RestrictRecursion' argumenet - $null = Add-DnsServerQueryResolutionPolicy -Name "${subnetName}-OtherFqdns-Dns${recursionType}" -Action ALLOW -ClientSubnet "EQ,$SubnetName" -Condition "AND" -FQDN "NE,$AllowedFqdns" -ApplyOnRecursion -RecursionScope $recursionScopeName - Write-Output " [o] Set DNS '$recursionType' for other FQDNs on client subnet '$SubnetName'" - } catch { - Write-Output " [x] Failed to apply DNS policies to client subnet '$SubnetName'" - Write-Output $_.Exception - } -} - - -# Set name prefix for DNS client subnets and DNS resolution policies -$srePrefix = "sre-${sreId}" - - -# Create configurations containing CIDR and corresponding name stem -# ----------------------------------------------------------------- -if ($RestrictedCidrsCommaSeparatedList) { - $restrictedSubnets = @($RestrictedCidrsCommaSeparatedList.Split(",") | ForEach-Object { @{ Cidr = $_; Name = Get-DnsClientSubnetNameFromCidr -SrePrefix $srePrefix -Cidr $_ } }) -} else { - $restrictedSubnets = @() -} -if ($UnrestrictedCidrsCommaSeparatedList) { - $unrestrictedSubnets = @($UnrestrictedCidrsCommaSeparatedList.Split(",") | ForEach-Object { @{ Cidr = $_; Name = Get-DnsClientSubnetNameFromCidr -SrePrefix $srePrefix -Cidr $_ } }) -} else { - $unrestrictedSubnets = @() -} - - -# Remove pre-existing DNS query resolution policies for SRE -# --------------------------------------------------------- -Write-Output "`nDeleting pre-existing DNS resolution policies for SRE '$sreId'..." -$existingPolicies = Get-DnsServerQueryResolutionPolicy | Where-Object { $_.Name -like "$srePrefix-*" } -if ($existingPolicies) { - foreach ($existingPolicy in $existingPolicies) { - try { - Remove-DnsServerQueryResolutionPolicy -Name $existingPolicy.Name -Force - Write-Output " [o] Successfully deleted policy '$($existingPolicy.Name)'" - } catch { - Write-Output " [x] Failed to delete policy '$($existingPolicy.Name)'" - Write-Output $_.Exception - } - } -} else { - Write-Output " [o] No pre-existing DNS resolution policies found." -} - - -# Remove pre-existing DNS client subnets for SRE -# ---------------------------------------------- -Write-Output "`nDeleting pre-existing DNS client subnets for SRE '$sreId'..." -$existingSubnets = Get-DnsServerClientSubnet | Where-Object { $_.Name -like "$srePrefix-*" } -if ($existingSubnets) { - foreach ($existingSubnet in $existingSubnets) { - try { - Remove-DnsServerClientSubnet -Name $existingSubnet.Name -Force - Write-Output " [o] Successfully deleted subnet '$($existingSubnet.Name)'" - } catch { - Write-Output " [x] Failed to delete subnet '$($existingSubnet.Name)'" - Write-Output $_.Exception - } - } -} else { - Write-Output " [o] No pre-existing DNS client subnets found." -} - - -# Ensure DNS client subnets exist for unrestricted CIDR ranges -# ------------------------------------------------------------ -Write-Output "`nCreating DNS client subnets for unrestricted CIDR ranges (these will not be blocked)..." -if ($unrestrictedSubnets) { - $unrestrictedSubnets | ForEach-Object { Set-DnsClientSubnets -Cidr $_.cidr -SubnetName $_.Name } -} else { - Write-Output " [o] No exception CIDR ranges specifed." -} - - -# Ensure DNS client subnets exist for restricted CIDR ranges -# ---------------------------------------------------------- -Write-Output "`nCreating DNS client subnets for restricted CIDR ranges..." -if ($restrictedSubnets) { - $restrictedSubnets | ForEach-Object { Set-DnsClientSubnets -Cidr $_.cidr -SubnetName $_.Name } -} else { - Write-Output " [o] No blocked CIDR ranges specifed." -} - - -# Create DNS resolution policies for exception IP ranges -# ------------------------------------------------------ -# Assign all queries for exception CIDRs subnets to default ('.') recursion scope. -# We must set policies for exception CIDR subnets first to ensure they take precedence as we -# cannot set processing order to be greater than the total number of resolution policies. -Write-Output "`nCreating DNS resolution policies for unrestricted CIDR ranges (these will not be blocked)..." -if ($unrestrictedSubnets) { - $unrestrictedSubnets | ForEach-Object { Set-DnsQueryResolutionPolicy -SubnetName $_.Name -RestrictRecursion $false -AllowedFqdns $AllowedFqdnsCommaSeparatedList } -} else { - Write-Output " [o] No unrestricted CIDR ranges specifed." -} - - -# Create DNS resolution policies for restricted IP ranges -# ---------------------------------------------------- -# Assign all queries for restricted CIDRs subnets to restricted recursion scope. -Write-Output "`nCreating DNS resolution policies for restricted CIDR ranges..." -if ($restrictedSubnets) { - $restrictedSubnets | ForEach-Object { Set-DnsQueryResolutionPolicy -SubnetName $_.Name -RestrictRecursion $true -AllowedFqdns $AllowedFqdnsCommaSeparatedList } -} else { - Write-Output " [o] No restricted CIDR ranges specifed." -} - - -# Clear DNS cache to avoid midleading tests -# ----------------------------------------- -# If a domain has previously been queried and is in the cache, it will be -# returned without recursion to external DNS servers -Write-Output "`nDisabling DNS cache..." -try { - Set-ItemProperty HKLM:\System\CurrentControlSet\Services\DNS\Parameters MaxCacheTtl 0x0 -Type DWord -ErrorAction Stop - Restart-Service -Name DNS -ErrorAction Stop - Start-Sleep -Seconds 30 - Clear-DnsServerCache -Force -ErrorAction Stop - Write-Output " [o] Successfully disabled DNS cache." -} catch { - Write-Output " [x] Failed to disable DNS cache." - Write-Output $_.Exception -} diff --git a/deployment/secure_research_environment/remote/network_configuration/scripts/test_external_dns_resolution_fails.sh b/deployment/secure_research_environment/remote/network_configuration/scripts/test_external_dns_resolution_fails.sh deleted file mode 100644 index fd3227cd2c..0000000000 --- a/deployment/secure_research_environment/remote/network_configuration/scripts/test_external_dns_resolution_fails.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# Test DNS resolution lockdown -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# -# The following variables are expected by the script -# SHM_DOMAIN_FQDN -# SHM_DC1_FQDN -# SHM_DC2_FQDN -# OUTBOUND_INTERNET - -confirm_dns_lookup_fails() { - FQDN=$1 - DNS_SERVER=$2 - if [ -z "$DNS_SERVER" ]; then - DNS_SERVER_MSG="default DNS servers" - else - DNS_SERVER_MSG="$DNS_SERVER" - fi - sudo systemd-resolve --flush-caches - NS_RESULT=$(nslookup $FQDN $DNS_SERVER) - if [ $? -eq 0 ]; then - echo -e " [x] DNS lookup for '$FQDN' unexpectedly succeeded via $DNS_SERVER_MSG." - echo -e "$NS_RESULT\n" - return 1 - else - echo -e " [o] DNS lookup for '$FQDN' failed as expected via $DNS_SERVER_MSG." - return 0 - fi -} - -confirm_dns_lookup_succeeds() { - FQDN=$1 - DNS_SERVER=$2 - if [ -z "$DNS_SERVER" ]; then - DNS_SERVER_MSG="default DNS servers" - else - DNS_SERVER_MSG="$DNS_SERVER" - fi - sudo systemd-resolve --flush-caches - NS_RESULT=$(nslookup $FQDN $DNS_SERVER) - if [ $? -eq 0 ]; then - echo -e " [o] DNS lookup for '$FQDN' succeeded as expected via $DNS_SERVER_MSG." - return 0 - else - echo -e " [x] DNS lookup for '$FQDN' unexpectedly failed via $DNS_SERVER_MSG." - echo -e "$NS_RESULT\n" - return 1 - fi -} - -echo "Testing DNS resolution lockdown" -echo "-------------------------------" -echo "$HOSTNAME $(date -u --iso-8601=seconds)" - -FAILED_TESTS=0 -echo -e "\nTesting DNS lookup for internal FQDNs via default DNS servers..." -confirm_dns_lookup_succeeds "$SHM_DOMAIN_FQDN" || FAILED_TESTS=$(($FAILED_TESTS + 1)) -confirm_dns_lookup_succeeds "$SHM_DC1_FQDN" || FAILED_TESTS=$(($FAILED_TESTS + 1)) -confirm_dns_lookup_succeeds "$SHM_DC2_FQDN" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - -echo -e "\nTesting DNS lookup for non-existent external domains via default DNS servers..." -confirm_dns_lookup_fails "fail.example.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - -echo -e "\nTesting DNS lookup for resolvable external domains via default DNS servers..." -if [ "$OUTBOUND_INTERNET" == "Deny" ]; then - confirm_dns_lookup_fails "example.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "doi.org" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "google.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "facebook.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) -elif [ "$OUTBOUND_INTERNET" == "Allow" ]; then - confirm_dns_lookup_succeeds "example.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "doi.org" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "google.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "facebook.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) -fi -# Check a domain on the allowlist which should always work -confirm_dns_lookup_succeeds "time.google.com" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - -echo -e "\nTesting DNS lookup for non-existent external domains via Azure Platform DNS servers..." -confirm_dns_lookup_fails "fail.example.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - -echo -e "\nTesting DNS lookup for resolvable external domains via Azure Platform DNS servers..." -if [ "$OUTBOUND_INTERNET" == "Deny" ]; then - # All domains should fail as the Azure DNS server is not directly contactable - confirm_dns_lookup_fails "example.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "doi.org" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "google.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "facebook.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_fails "time.google.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) -elif [ "$OUTBOUND_INTERNET" == "Allow" ]; then - confirm_dns_lookup_succeeds "example.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "doi.org" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "google.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "facebook.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) - confirm_dns_lookup_succeeds "time.google.com" "168.63.129.16" || FAILED_TESTS=$(($FAILED_TESTS + 1)) -fi - -if [ $FAILED_TESTS -eq 0 ]; then - echo -e "\n *** SUCCESS: All tests passed! ***" -else - echo -e "\n *** ERROR: $FAILED_TESTS test(s) failed! ***" -fi diff --git a/deployment/secure_research_environment/remote/network_configuration/scripts/update_mirror_settings.sh b/deployment/secure_research_environment/remote/network_configuration/scripts/update_mirror_settings.sh deleted file mode 100644 index 16cee00d65..0000000000 --- a/deployment/secure_research_environment/remote/network_configuration/scripts/update_mirror_settings.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Update PyPI and CRAN repository settings -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# -# The following variables are expected by the script -# CRAN_MIRROR_INDEX_URL -# PYPI_MIRROR_IP -# PYPI_MIRROR_HOST - -# Update PyPI -#------------ -echo "Updating PyPI mirror to point at '$PYPI_MIRROR_INDEX_URL'" -echo "" > /etc/pip.conf -echo "[global]" >> /etc/pip.conf -echo "index = ${PYPI_MIRROR_INDEX}" >> /etc/pip.conf -echo "index-url = ${PYPI_MIRROR_INDEX_URL}" >> /etc/pip.conf -echo "trusted-host = ${PYPI_MIRROR_HOST}" >> /etc/pip.conf - - -# Update CRAN -#------------ -echo "Updating CRAN mirror to point at '$CRAN_MIRROR_INDEX_URL'" -echo "" > /etc/R/Rprofile.site -echo "local({" >> /etc/R/Rprofile.site -echo " r <- getOption(\"repos\")" >> /etc/R/Rprofile.site -echo " r[\"CRAN\"] <- \"${CRAN_MIRROR_INDEX_URL}\"" >> /etc/R/Rprofile.site -echo " options(repos = r)" >> /etc/R/Rprofile.site -echo "})" >> /etc/R/Rprofile.site diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/check_drive_mounts.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/check_drive_mounts.sh deleted file mode 100644 index eb1a7b606c..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/check_drive_mounts.sh +++ /dev/null @@ -1,37 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables - -RED="\033[0;31m" -BLUE="\033[0;36m" -END="\033[0m" - -MOUNT_POINTS=("/data" "/home" "/scratch" "/shared" "/output") -echo -e "${BLUE}Checking drives are mounted...${END}" -for MOUNT_POINT in "${MOUNT_POINTS[@]}"; do - ls "${MOUNT_POINT}"/* > /dev/null 2>&1 - if (findmnt "$MOUNT_POINT" > /dev/null 2>&1); then - echo -e "${BLUE} [o] ${MOUNT_POINT} is mounted...${END}" - else - echo -e "${RED} [ ] ${MOUNT_POINT} not mounted. Attempting to mount...${END}" - MOUNT_UNIT="$(echo "$MOUNT_POINT" | tr -d '/').mount" - if [ -e "/etc/systemd/system/${MOUNT_UNIT}" ]; then - systemctl start "${MOUNT_UNIT}" - else - mount "$MOUNT_POINT" - fi - fi -done -sleep 30 - -echo -e "${BLUE}Rechecking drives are mounted...${END}" -for MOUNT_POINT in "${MOUNT_POINTS[@]}"; do - ls "${MOUNT_POINT}"/* > /dev/null 2>&1 - if (findmnt "$MOUNT_POINT" > /dev/null 2>&1); then - echo -e "${BLUE} [o] ${MOUNT_POINT} is mounted...${END}" - df -h | grep "$MOUNT_POINT" - else - echo -e "${RED} [x] ${MOUNT_POINT} is not currently mounted...${END}" - fi -done diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/check_ldap_connection.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/check_ldap_connection.sh deleted file mode 100644 index 18d23cf3e3..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/check_ldap_connection.sh +++ /dev/null @@ -1,33 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# It expects the following parameters: -# DOMAIN_CONTROLLER -# LDAP_SEARCH_USER -# LDAP_TEST_USER -# SERVICE_PATH - -RED="\033[0;31m" -BLUE="\033[0;36m" -END="\033[0m" - -echo -e "${BLUE}Checking LDAP connectivity${END}" - -LDAP_SECRET=$(sudo cat /etc/ldap.secret) -LDAPSEARCH_CMD="ldapsearch -LLL -D \"${LDAP_SEARCH_USER}@${DOMAIN_LOWER}\" -w \"$LDAP_SECRET\" -p 389 -h \"$DOMAIN_CONTROLLER\" -b \"$SERVICE_PATH\" -s sub \"(sAMAccountName=${LDAP_TEST_USER})\"" - -echo -e "Testing LDAP search..." -LDAP_SEARCH_OUTPUT=$(eval ${LDAPSEARCH_CMD} 2>&1) # NB. eval is OK here since we control the inputs -STATUS=$(echo "${LDAP_SEARCH_OUTPUT}" | grep 'sAMAccountName:' | cut -d' ' -f2) -if [ "$STATUS" == "$LDAP_TEST_USER" ]; then - echo -e "${BLUE} [o] LDAP search succeeded: found user '$STATUS'.${END}" - echo "LDAP SEARCH RESULT:" - echo "$LDAP_SEARCH_OUTPUT" - exit 0 -else - echo -e "${RED} [x] LDAP search failed.${END}" - echo "LDAP SEARCH RESULT:" - echo "$LDAP_SEARCH_OUTPUT" - exit 1 -fi \ No newline at end of file diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/rerun_realm_join.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/rerun_realm_join.sh deleted file mode 100644 index ef4dfb83d6..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/rerun_realm_join.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# It expects the following parameters: -# DOMAIN_JOIN_USER -# DOMAIN_LOWER -# DOMAIN_JOIN_OU - -RED="\033[0;31m" -BLUE="\033[0;36m" -END="\033[0m" - -echo -e "${BLUE}Checking realm membership${END}" -REALM_LIST_CMD="sudo realm list" -STATUS_CMD="sudo realm list --name-only | grep $DOMAIN_LOWER" -REJOIN_CMD="sudo cat /etc/domain-join.secret | sudo realm join --verbose --computer-ou='${DOMAIN_JOIN_OU}' -U ${DOMAIN_JOIN_USER} ${DOMAIN_FQDN_LOWER} --install=/ 2>&1" - -echo -e "Testing current realms..." -STATUS=$(${STATUS_CMD}) -if [ "$STATUS" == "" ]; then - echo -e "${RED}No realm memberships found. Attempting to join $DOMAIN_LOWER${END}" - eval $REJOIN_CMD - sleep 30 # allow time for the realm join to propagate -else - echo -e "${BLUE} [o] Currently a member of realm: '$STATUS'. No need to rejoin.${END}" - echo "REALM LIST RESULT:" - eval $REALM_LIST_CMD - exit 0 -fi - -echo -e "Retesting current realms..." -STATUS=$(${STATUS_CMD}) -if [ "$STATUS" == "" ]; then - echo -e "${RED} [x] No realm memberships found!${END}" - echo "REALM LIST RESULT:" - eval $REALM_LIST_CMD - exit 1 -else - echo -e "${BLUE} [o] Currently a member of realm: '$STATUS'${END}" - echo "REALM LIST RESULT:" - eval $REALM_LIST_CMD - exit 0 -fi \ No newline at end of file diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_name_resolution_service.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_name_resolution_service.sh deleted file mode 100644 index e8a0727604..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_name_resolution_service.sh +++ /dev/null @@ -1,104 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables -# It expects the following parameters: -# DOMAIN_CONTROLLER -# DOMAIN_LOWER - -RED="\033[0;31m" -BLUE="\033[0;36m" -END="\033[0m" - -SYSTEMD_RESOLV_CONF="/run/systemd/resolve/resolv.conf" - -# Test nslookup -test_dnslookup () { - local NS_RESULT - NS_RESULT="$(nslookup "$DOMAIN_CONTROLLER")" - local NS_EXIT=$? - - echo "NS LOOKUP RESULT:" - echo "$NS_RESULT" - - if [ $NS_EXIT -eq 0 ]; then - echo -e "${BLUE}Name resolution working.${END}" - else - echo -e "${RED}Name resolution not working. Testing with systemd${END}" - systemd-resolve "$DOMAIN_CONTROLLER" - fi - return $NS_EXIT -} - -# Test the /etc/resolv.conf file -test_resolve_conf() { - local RESOLVE_CONF_LOCATION - RESOLVE_CONF_LOCATION=$(realpath /etc/resolv.conf) - sed -e 's|^|/etc/resolv.conf: |g' /etc/resolv.conf - if [ "${RESOLVE_CONF_LOCATION}" != "${SYSTEMD_RESOLV_CONF}" ]; then - echo -e "${RED}/etc/resolv.conf is currently pointing to ${RESOLVE_CONF_LOCATION}${END}" - return 1 - else - echo -e "${BLUE}/etc/resolv.conf is currently pointing to ${RESOLVE_CONF_LOCATION}${END}" - fi - return 0 -} - -# Update systemd resolv.conf -update_systemd_resolv_conf () { - cp ${SYSTEMD_RESOLV_CONF} /tmp/resolved.conf - sudo sed -i -e "s/^[#]DNS=.*/DNS=/" -e "s/^[#]FallbackDNS=.*/FallbackDNS=/" -e "s/^[#]Domains=.*/Domains=${DOMAIN_FQDN_LOWER}/" ${SYSTEMD_RESOLV_CONF} - if (cmp ${SYSTEMD_RESOLV_CONF} /tmp/resolved.conf); then - echo "No updates needed" - else - echo "Previous ${SYSTEMD_RESOLV_CONF}:" - grep -v -e '^[[:space:]]*$' /tmp/resolved.conf | grep -v "^#" - echo "Updated ${SYSTEMD_RESOLV_CONF}:" - grep -v -e '^[[:space:]]*$' ${SYSTEMD_RESOLV_CONF} | grep -v "^#" - restart_resolved - fi -} - -# Restart the systemd-resolved service -restart_resolved () { - echo "Restarting systemd-resolved name service" - sudo systemctl restart systemd-resolved -} - -# Reset /etc/resolv.conf -reset_resolv_conf () { - echo -e "${BLUE}Resetting /etc/resolv.conf symlink${END}" - sudo rm /etc/resolv.conf - sudo ln -rsf "${SYSTEMD_RESOLV_CONF}" /etc/resolv.conf - test_resolve_conf -} - - -# Run name resolution checks -# -------------------------- -echo -e "${BLUE}Checking name resolution${END}" - -# Check nslookup -echo -e "\nTesting connectivity for '$DOMAIN_CONTROLLER'" -test_dnslookup -DNS_STATUS=$? - -# Update ${SYSTEMD_RESOLV_CONF} if necessary -echo -e "\nTesting ${SYSTEMD_RESOLV_CONF}" -update_systemd_resolv_conf - -# Check where resolv.conf is pointing -echo -e "\nTesting /etc/resolv.conf" -test_resolve_conf -RESOLVE_CONF_STATUS=$? -if [ "$RESOLVE_CONF_STATUS" != "0" ]; then - reset_resolv_conf - test_dnslookup - DNS_STATUS=$? -fi - -# If the DNS problem is not solved then restart the service -if [ "$DNS_STATUS" != "0" ]; then - restart_resolved - test_dnslookup -fi \ No newline at end of file diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_sssd_service.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_sssd_service.sh deleted file mode 100644 index 16e4e4bebe..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_sssd_service.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables - -RED="\033[0;31m" -BLUE="\033[0;36m" -END="\033[0m" - -echo -e "${BLUE}Checking SSSD status${END}" -STATUS_CMD="sudo systemctl status sssd" - -echo "Testing sssd status..." -STATUS=$(${STATUS_CMD}) -if [ "$(echo $STATUS | grep 'Active: failed')" != "" ]; then - echo -e "${RED}SSSD service has failed. Restarting...${END}" - sudo systemctl stop sssd - # Update sssd settings - sudo sed -i -E 's|(use_fully_qualified_names = ).*|\1False|' /etc/sssd/sssd.conf - sudo sed -i -E 's|(fallback_homedir = ).*|\1/home/%u|' /etc/sssd/sssd.conf - sudo sed -i -E 's|(access_provider = ).*|\1simple|' /etc/sssd/sssd.conf - # Force re-generation of config files - sudo rm /var/lib/sss/db/*.ldb - sudo systemctl restart sssd -else - echo -e "${BLUE} [o] SSSD service is working. No need to restart.${END}" - echo "SSSD STATUS RESULT:" - echo "$STATUS" - exit 0 -fi - -echo "Retesting sssd status..." -STATUS=$(${STATUS_CMD}) -if [ "$(echo $STATUS | grep 'Active: failed')" != "" ]; then - echo -e "${RED} [x] SSSD service not working after restart.${END}" - echo "SSSD STATUS RESULT:" - echo "$STATUS" - exit 1 -else - echo -e "${BLUE} [o] SSSD service is working after restart.${END}" - echo "SSSD STATUS RESULT:" - echo "$STATUS" - exit 0 -fi \ No newline at end of file diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_xrdp_service.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_xrdp_service.sh deleted file mode 100644 index 1ce44f1279..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/restart_xrdp_service.sh +++ /dev/null @@ -1,45 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables - -RED="\033[0;31m" -BLUE="\033[0;36m" -END="\033[0m" - -echo -e "${BLUE}Checking xrdp status${END}" -XRDP_CMD="systemctl status xrdp" -XRDP_SESMAN_CMD="systemctl status xrdp-sesman" - - -echo "Testing xrdp status..." -XRDP_STATUS=$(sudo ${XRDP_CMD} | grep 'Active: failed') -XRDP_SESMAN_STATUS=$(sudo ${XRDP_SESMAN_CMD} | grep 'Active: failed') -if [[ ("$XRDP_STATUS" != "") || ("$XRDP_SESMAN_STATUS" != "") ]]; then - echo -e "${RED}xrdp services have failed. Restarting...${END}" - sudo systemctl restart xrdp - sudo systemctl restart xrdp-sesman -else - echo -e "${BLUE} [o] xrdp services are working. No need to restart.${END}" - echo "XRDP STATUS RESULT:" - sudo ${XRDP_CMD} - sudo ${XRDP_SESMAN_CMD} - exit 0 -fi - -echo "Retesting xrdp status..." -XRDP_STATUS=$(sudo ${XRDP_CMD} | grep 'Active: failed') -XRDP_SESMAN_STATUS=$(sudo ${XRDP_SESMAN_CMD} | grep 'Active: failed') -if [[ ("$XRDP_STATUS" != "") || ("$XRDP_SESMAN_STATUS" != "") ]]; then - echo -e "${RED} [x] xrdp services are not working after restart.${END}" - echo "XRDP STATUS RESULT:" - sudo ${XRDP_CMD} - sudo ${XRDP_SESMAN_CMD} - exit 1 -else - echo -e "${BLUE} [o] xrdp services are working after restart.${END}" - echo "XRDP STATUS RESULT:" - sudo ${XRDP_CMD} - sudo ${XRDP_SESMAN_CMD} - exit 0 -fi diff --git a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/set_smoke_test_permissions.sh b/deployment/secure_research_environment/remote/secure_research_desktop/scripts/set_smoke_test_permissions.sh deleted file mode 100644 index c96f9c8df8..0000000000 --- a/deployment/secure_research_environment/remote/secure_research_desktop/scripts/set_smoke_test_permissions.sh +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash -# This script is designed to be deployed to an Azure Linux VM via -# the Powershell Invoke-AzVMRunCommand, which sets all variables -# passed in its -Parameter argument as environment variables - -# Put the tests into the correct filesystem location -mv /opt/tests/smoke_tests/* /opt/tests -rmdir /opt/tests/smoke_tests/ - -# Update file permissions -chmod -R 644 /opt/tests/ -chmod ugo+x /opt/tests/ /opt/tests/tests/ /opt/tests/package_lists/ -chmod ugo+rx /opt/tests/tests/*.{jl,py,sh,R} - -# If packages lists were uploaded during the the build then we should use those -if [ -n "$(ls -A /opt/build/packages 2>/dev/null)" ]; then - rm -rf /opt/tests/package_lists/* - ln -s /opt/build/packages/* /opt/tests/package_lists/ -fi - -# Show final outputs -ls -alh /opt/tests/* diff --git a/deployment/secure_research_environment/setup/Add_Single_SRD.ps1 b/deployment/secure_research_environment/setup/Add_Single_SRD.ps1 deleted file mode 100644 index b9f5072ef4..0000000000 --- a/deployment/secure_research_environment/setup/Add_Single_SRD.ps1 +++ /dev/null @@ -1,300 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $true, HelpMessage = "Last octet of IP address (eg. '160')")] - [string]$ipLastOctet, - [Parameter(Mandatory = $false, HelpMessage = "Enter VM size to use (or leave empty to use default)")] - [string]$vmSize = "default", - [Parameter(Mandatory = $false, HelpMessage = "Perform an in-place upgrade.")] - [switch]$Upgrade, - [Parameter(Mandatory = $false, HelpMessage = "Force an in-place upgrade.")] - [switch]$Force -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module Powershell-Yaml -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/RemoteCommands -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Set VM name and size -# We need to define a unique hostname of no more than 15 characters -# ----------------------------------------------------------------- -if ($vmSize -eq "default") { $vmSize = $config.sre.srd.vmSizeDefault } -$vmHostname = "SRE-$($config.sre.id)-${ipLastOctet}".ToUpper() -$vmNamePrefix = "${vmHostname}-SRD".ToUpper() -$vmName = "$vmNamePrefix-$($config.sre.srd.vmImage.version)".Replace(".", "-") - - -# Create SRD resource group if it does not exist -# ---------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.sre.srd.rg -Location $config.sre.location - - -# Retrieve VNET and subnets -# ------------------------- -Add-LogMessage -Level Info "Retrieving virtual network '$($config.sre.network.vnet.name)'..." -$vnet = Get-AzVirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop -$computeSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.compute.name -VirtualNetworkName $vnet.Name -ResourceGroupName $config.sre.network.vnet.rg -$deploymentSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.deployment.name -VirtualNetworkName $vnet.Name -ResourceGroupName $config.sre.network.vnet.rg - - -# Get deployment and final IP addresses -# ------------------------------------- -$deploymentIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.deployment.cidr -VirtualNetwork $vnet -$finalIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.compute.cidr -Offset $ipLastOctet - - -# Check whether this IP address has been used. -# -------------------------------------------- -$existingNic = Get-AzNetworkInterface -ResourceGroupName $config.sre.srd.rg | Where-Object { $_.IpConfigurations.PrivateIpAddress -eq $finalIpAddress } -if (($existingNic.VirtualMachine.Id) -and -not $Upgrade) { - Add-LogMessage -Level InfoSuccess "A VM already exists with IP address '$finalIpAddress'. Use -Upgrade if you want to overwrite this." - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - exit 0 -} - - -# If we are upgrading then we need an existing VM -# ----------------------------------------------- -if ($Upgrade) { - # Attempt to find exactly one existing virtual machine - $existingVm = Get-AzVM | Where-Object { $_.Name -match "$vmNamePrefix-\d{2}-\d{2}-\d{10}" } - if (-not $existingVm) { - Add-LogMessage -Level Fatal "No existing VM found to upgrade" - } elseif ($existingVm.Length -ne 1) { - $existingVm | ForEach-Object { Add-LogMessage -Level Info "Candidate VM: '$($_.Name)'" } - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - Add-LogMessage -Level Fatal "Multiple candidate VMs found, aborting upgrade" - } else { - Add-LogMessage -Level Info "Found an existing VM '$($existingVm.Name)'" - } - - # Check whether an upgrade is needed - if (($existingVm.Name -eq $vmName) -and -not $Force) { - Add-LogMessage -Level Warning "The existing VM appears to be using the same image version, no upgrade is needed. Use -Force to upgrade anyway." - $null = Set-AzContext -Context $originalContext -ErrorAction Stop - exit 0 - } - - # Stop and remove the existing VM - Stop-VM -Name $existingVm.Name -ResourceGroupName $existingVm.ResourceGroupName - Add-LogMessage -Level Info "[ ] Removing existing VM '$($existingVm.Name)'" - $null = Remove-VirtualMachine -Name $existingVm.Name -ResourceGroupName $existingVm.ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Removal of VM '$($existingVm.Name)' succeeded" - } else { - Add-LogMessage -Level Fatal "Removal of VM '$($existingVm.Name)' failed!" - } - - # Remove the existing NIC - if ($existingNic) { - Add-LogMessage -Level Info "[ ] Deleting existing network card '$($existingNic.Name)'" - $null = Remove-AzNetworkInterface -Name $existingNic.Name -ResourceGroupName $existingNic.ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Removal of network card '$($existingNic.Name)' succeeded" - } else { - Add-LogMessage -Level Fatal "Removal of network card '$($existingNic.Name)' failed!" - } - } - - # Remove the existing disks - foreach ($diskType in @("OS")) { - Add-LogMessage -Level Info "[ ] Removing '$diskType' disks" - foreach ($disk in $(Get-AzDisk | Where-Object { $_.Name -match "$vmNamePrefix-\d-\d-\d{10}-$diskType-DISK" })) { - $null = $disk | Remove-AzDisk -Force - if ($?) { - Add-LogMessage -Level Success "Removal of '$($disk.Name)' succeeded" - } else { - Add-LogMessage -Level Fatal "Removal of '$($disk.Name)' failed!" - } - } - } -} - - -# Check for any orphaned disks -# ---------------------------- -$orphanedDisks = Get-AzDisk | Where-Object { $_.DiskState -eq "Unattached" } | Where-Object { $_.Name -Like "${$vmNamePrefix}*" } -if ($orphanedDisks) { - Add-LogMessage -Level Info "Removing $($orphanedDisks.Length) orphaned disks" - $null = $orphanedDisks | Remove-AzDisk -Force - if ($?) { - Add-LogMessage -Level Success "Orphaned disk removal succeeded" - } else { - Add-LogMessage -Level Fatal "Orphaned disk removal failed!" - } -} - - -# Check that this is a valid image version and get its ID -# ------------------------------------------------------- -$image = Get-ImageFromGallery -GalleryName $config.shm.srdImage.gallery.name ` - -ImageSku $config.sre.srd.vmImage.type ` - -ImageVersion $config.sre.srd.vmImage.version ` - -ResourceGroupName $config.shm.srdImage.gallery.rg ` - -Subscription $config.shm.srdImage.subscription - - -# Set the OS disk size for this image -# ----------------------------------- -$osDiskSizeGB = $config.sre.srd.disks.os.sizeGb -if ($osDiskSizeGB -eq "default") { $osDiskSizeGB = 2 * [int]($image.StorageProfile.OsDiskImage.SizeInGB) } -if ([int]$osDiskSizeGB -lt [int]$image.StorageProfile.OsDiskImage.SizeInGB) { - Add-LogMessage -Level Fatal "Image $($image.Name) needs an OS disk of at least $($image.StorageProfile.OsDiskImage.SizeInGB) GB!" -} - - -# Retrieve passwords from the Key Vault -# ------------------------------------- -Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.sre.keyVault.name)'..." -$domainJoinPassword = Resolve-KeyVaultSecret -VaultName $config.shm.keyVault.name -SecretName $config.shm.users.computerManagers.linuxServers.passwordSecretName -DefaultLength 20 -AsPlaintext -$backupContainerSasToken = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.storage.persistentdata.containers.backup.connectionSecretName -AsPlaintext -$ingressContainerSasToken = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.storage.persistentdata.containers.ingress.connectionSecretName -AsPlaintext -$egressContainerSasToken = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.storage.persistentdata.containers.egress.connectionSecretName -AsPlaintext -$ldapSearchPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.users.serviceAccounts.ldapSearch.passwordSecretName -DefaultLength 20 -AsPlaintext -$vmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.keyVault.secretNames.adminUsername -DefaultValue "sre$($config.sre.id)admin".ToLower() -AsPlaintext - - -# Construct the cloud-init YAML file for the target subscription -# -------------------------------------------------------------- -Add-LogMessage -Level Info "Constructing cloud-init from template..." -$cloudInitBasePath = Join-Path $PSScriptRoot ".." "cloud_init" -Resolve -$cloudInitFilePath = Get-ChildItem -Path $cloudInitBasePath | Where-Object { $_.Name -eq "cloud-init-srd-shm-${shmId}-sre-${sreId}.mustache.yaml" } | ForEach-Object { $_.FullName } -if (-not $cloudInitFilePath) { $cloudInitFilePath = Join-Path $cloudInitBasePath "cloud-init-srd.mustache.yaml" } -# Load the cloud-init template then add resources and expand mustache placeholders -$config["srd"] = @{ - domainJoinPassword = $domainJoinPassword - ldapUserFilter = "(&(objectClass=user)(memberOf=CN=$($config.sre.domain.securityGroups.researchUsers.name),$($config.shm.domain.ous.securityGroups.path)))" - ldapSearchUserDn = "CN=$($config.sre.users.serviceAccounts.ldapSearch.name),$($config.shm.domain.ous.serviceAccounts.path)" - ldapSearchUserPassword = $ldapSearchPassword - backupContainerSasToken = $backupContainerSasToken - ingressContainerSasToken = $ingressContainerSasToken - egressContainerSasToken = $egressContainerSasToken - hostname = ($vmHostname | Limit-StringLength -MaximumLength 15) - ipAddress = $finalIpAddress - xrdpCustomLogoEncoded = (ConvertTo-Base64GZip -Path (Join-Path $cloudInitBasePath "resources" "xrdp_custom_logo.bmp")) -} -$cloudInitTemplate = Get-Content $cloudInitFilePath -Raw -$cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path $cloudInitBasePath "resources") -$cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path ".." ".." "common" "resources") -$cloudInitTemplate = Expand-MustacheTemplate -Template $cloudInitTemplate -Parameters $config - - -# Deploy the VM -# ------------- -$bootDiagnosticsAccount = Deploy-StorageAccount -Name $config.sre.storage.bootdiagnostics.accountName -ResourceGroupName $config.sre.storage.bootdiagnostics.rg -Location $config.sre.location -$networkCard = Deploy-NetworkInterface -Name "$vmName-NIC" -ResourceGroupName $config.sre.srd.rg -Subnet $deploymentSubnet -PrivateIpAddress $deploymentIpAddress -Location $config.sre.location -$dataDisks = @( - (Deploy-ManagedDisk -Name "$vmName-SCRATCH-DISK" -SizeGB $config.sre.srd.disks.scratch.sizeGb -Type $config.sre.srd.disks.scratch.type -ResourceGroupName $config.sre.srd.rg -Location $config.sre.location) -) -$params = @{ - Name = $vmName - Size = $vmSize - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.srd.adminPasswordSecretName -DefaultLength 20) - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $cloudInitTemplate - location = $config.sre.location - NicId = $networkCard.Id - OsDiskSizeGb = $osDiskSizeGB - OsDiskType = $config.sre.srd.disks.os.type - ResourceGroupName = $config.sre.srd.rg - DataDiskIds = ($dataDisks | ForEach-Object { $_.Id }) - ImageId = $image.Id -} -$vm = Deploy-LinuxVirtualMachine @params -$null = New-AzTag -ResourceId $vm.Id -Tag @{"Build commit hash" = $image.Tags["Build commit hash"] } - - -# Change subnets and IP address while the VM is off -# ------------------------------------------------- -Update-VMIpAddress -Name $vmName -ResourceGroupName $config.sre.srd.rg -Subnet $computeSubnet -IpAddress $finalIpAddress -# Update DNS records for this VM -Update-VMDnsRecords -DcName $config.shm.dc.vmName -DcResourceGroupName $config.shm.dc.rg -BaseFqdn $config.shm.domain.fqdn -ShmSubscriptionName $config.shm.subscriptionName -VmHostname $vmHostname -VmIpAddress $finalIpAddress - - -# Restart after the networking switch -# ----------------------------------- -Start-VM -Name $vmName -ResourceGroupName $config.sre.srd.rg -Wait-For -Target "domain joining to complete" -Seconds 120 - - -# Upload smoke tests to SRD -# ------------------------- -Add-LogMessage -Level Info "Creating smoke test package for the SRD..." -# Arrange files in temporary directory -$localSmokeTestDir = New-Item -ItemType Directory -Path (Join-Path ([System.IO.Path]::GetTempPath()) ([System.IO.Path]::GetRandomFileName()) "smoke_tests") -Copy-Item (Join-Path $PSScriptRoot ".." ".." "secure_research_desktop" "packages") -Filter *.* -Destination (Join-Path $localSmokeTestDir "package_lists") -Recurse -Copy-Item (Join-Path $PSScriptRoot ".." ".." ".." "tests" "srd_smoke_tests") -Filter *.* -Destination (Join-Path $localSmokeTestDir "tests") -Recurse -# Expand mustache templates -$PythonYaml = (ConvertFrom-Yaml (Get-Content -Raw (Join-Path $PSScriptRoot ".." ".." "secure_research_desktop" "packages" "packages-python.yaml"))) -$MssqlConfig = $config.sre.databases.instances | Where-Object { $_.type -eq "MSSQL" } | Select-Object -First 1 -$PostgresqlConfig = $config.sre.databases.instances | Where-Object { $_.type -eq "PostgreSQL" } | Select-Object -First 1 -$config["SmokeTests"] = [ordered]@{ - MSSQLExists = $MssqlConfig.Count -gt 0 - MSSQLPort = $MssqlConfig ? $MssqlConfig.port : "" - MSSQLVMName = $MssqlConfig ? $MssqlConfig.vmName : "" - PostgreSQLExists = $PostgresqlConfig.Count -gt 0 - PostgreSQLPort = $PostgresqlConfig ? $PostgresqlConfig.port : "" - PostgreSQLVMName = $PostgresqlConfig ? $PostgresqlConfig.vmName : "" - PyPIPackageFirst = Get-Content (Join-Path $PSScriptRoot ".." ".." ".." "environment_configs" "package_lists" "allowlist-full-python-pypi-tier3.list") -Head 1 - PyPIPackageLast = Get-Content (Join-Path $PSScriptRoot ".." ".." ".." "environment_configs" "package_lists" "allowlist-full-python-pypi-tier3.list") -Tail 1 - Python_v0 = $PythonYaml["versions"][0] - Python_v1 = $PythonYaml["versions"][1] - Python_v2 = $PythonYaml["versions"][2] - TestFailures = $config.sre.tier -ge 3 ? 1 : 0 -} -foreach ($MustacheFilePath in (Get-ChildItem -Path $localSmokeTestDir -Include *.mustache.* -File -Recurse)) { - $ExpandedFilePath = $MustacheFilePath -replace ".mustache.", "." - Expand-MustacheTemplate -TemplatePath $MustacheFilePath -Parameters $config | Set-Content -Path $ExpandedFilePath - Remove-Item -Path $MustacheFilePath -} -Move-Item -Path (Join-Path $localSmokeTestDir "tests" "run_all_tests.bats") -Destination $localSmokeTestDir -Move-Item -Path (Join-Path $localSmokeTestDir "tests" "README.md") -Destination $localSmokeTestDir -# Upload files to VM via the SRE artifacts storage account (note that this requires access to be allowed from both the deployment machine and the SRD) -$artifactsStorageAccount = Get-StorageAccount -Name $config.sre.storage.artifacts.account.name -ResourceGroupName $config.sre.storage.artifacts.rg -SubscriptionName $config.sre.subscriptionName -ErrorAction Stop -Send-FilesToLinuxVM -LocalDirectory $localSmokeTestDir -RemoteDirectory "/opt/tests" -VMName $vmName -VMResourceGroupName $config.sre.srd.rg -BlobStorageAccount $artifactsStorageAccount -Remove-Item -Path $localSmokeTestDir -Recurse -Force -# Set smoke test permissions -Add-LogMessage -Level Info "[ ] Set smoke test permissions on $vmName" -$scriptPath = Join-Path $PSScriptRoot ".." "remote" "secure_research_desktop" "scripts" "set_smoke_test_permissions.sh" -$null = Invoke-RemoteScript -Shell "UnixShell" -ScriptPath $scriptPath -VMName $vmName -ResourceGroupName $config.sre.srd.rg - - -# Run remote diagnostic scripts -# ----------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Run_SRE_SRD_Remote_Diagnostics.ps1')" -shmId $shmId -sreId $sreId -ipLastOctet $ipLastOctet } - - -# Update Guacamole dashboard to include this new VM -# ------------------------------------------------- -if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Update_SRE_Guacamole_Dashboard.ps1')" -shmId $shmId -sreId $sreId } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Apply_SRE_Network_Configuration.ps1 b/deployment/secure_research_environment/setup/Apply_SRE_Network_Configuration.ps1 deleted file mode 100644 index 4e5b7b9b3e..0000000000 --- a/deployment/secure_research_environment/setup/Apply_SRE_Network_Configuration.ps1 +++ /dev/null @@ -1,153 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Get common parameters -# --------------------- -$allowedSources = ($config.sre.remoteDesktop.networkRules.allowedSources.Split(',') | ForEach-Object { $_.Trim() }) # NB. Use an array, splitting on commas and trimming any whitespace from each item to avoid "invalid Address prefix" errors caused by extraneous whitespace -$outboundInternetAccessRuleName = "$($config.sre.remoteDesktop.networkRules.outboundInternet)ExternalInternetOutbound" -$inboundApprovedUsersRuleName = "AllowUsersApprovedHttpsInbound" -$nsgs = @{} - - -# Ensure VMs are bound to correct NSGs -# ------------------------------------ -Add-LogMessage -Level Info "Applying network configuration for SRE '$($config.sre.id)' (Tier $($config.sre.tier)), hosted on subscription '$($config.sre.subscriptionName)'" - - -# ApacheGuacamole has several NSGs -# -------------------------------- -# Remote desktop -if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - # RDS gateway - Add-LogMessage -Level Info "Ensure Guacamole server is bound to correct NSG..." - $remoteDesktopSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.remoteDesktop.name -VirtualNetworkName $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg - $nsgs["remoteDesktop"] = Get-AzNetworkSecurityGroup -Name $config.sre.network.vnet.subnets.remoteDesktop.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop - if ($remoteDesktopSubnet.NetworkSecurityGroup.Id -eq $nsgs["remoteDesktop"].Id) { - Add-LogMessage -Level Info "Guacamole server is bound to NSG '$($nsgs["remoteDesktop"].Name)'" - } else { - Add-LogMessage -Level Fatal "Guacamole server is not bound to NSG '$($nsgs["remoteDesktop"].Name)'!" - } -} else { - Add-LogMessage -Level Fatal "Remote desktop type '$($config.sre.remoteDesktop.type)' was not recognised!" -} - -# Database servers -Add-LogMessage -Level Info "Ensure database servers are bound to correct NSG..." -$databaseSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.databases.name -VirtualNetworkName $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -$nsgs["databases"] = Get-AzNetworkSecurityGroup -Name $config.sre.network.vnet.subnets.databases.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop -if ($databaseSubnet.NetworkSecurityGroup.Id -eq $nsgs["databases"].Id) { - Add-LogMessage -Level Info "Database servers are bound to NSG '$($nsgs["databases"].Name)'" -} else { - Add-LogMessage -Level Fatal "Database servers are not bound to NSG '$($nsgs["databases"].Name)'!" -} - -# Webapp servers -Add-LogMessage -Level Info "Ensure webapp servers are bound to correct NSG..." -$webappsSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.webapps.name -VirtualNetworkName $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -$nsgs["webapps"] = Get-AzNetworkSecurityGroup -Name $config.sre.network.vnet.subnets.webapps.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop -if ($webappsSubnet.NetworkSecurityGroup.Id -eq $nsgs["webapps"].Id) { - Add-LogMessage -Level Info "Webapp servers are bound to NSG '$($nsgs["webapps"].Name)'" -} else { - Add-LogMessage -Level Fatal "Webapp servers are not bound to NSG '$($nsgs["webapps"].Name)'!" -} - -# SRDs -Add-LogMessage -Level Info "Ensure SRDs are bound to correct NSG..." -$computeSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.compute.name -VirtualNetworkName $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -$nsgs["compute"] = Get-AzNetworkSecurityGroup -Name $config.sre.network.vnet.subnets.compute.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop -if ($computeSubnet.NetworkSecurityGroup.Id -eq $nsgs["compute"].Id) { - Add-LogMessage -Level Info "SRDs are bound to NSG '$($nsgs["compute"].Name)'" -} else { - Add-LogMessage -Level Fatal "SRDs are not bound to NSG '$($nsgs["compute"].Name)'!" -} - -# Update remote desktop server NSG rules -if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - Add-LogMessage -Level Info "Setting inbound connection rules on Guacamole NSG..." - $null = Update-NetworkSecurityGroupRule -Name $inboundApprovedUsersRuleName -NetworkSecurityGroup $nsgs["remoteDesktop"] -SourceAddressPrefix $allowedSources -} else { - Add-LogMessage -Level Fatal "Remote desktop type '$($config.sre.remoteDesktop.type)' was not recognised!" -} - -# Update user-facing NSG rules -Add-LogMessage -Level Info "Setting outbound internet rules on user-facing NSGs..." -$null = Update-NetworkSecurityGroupRule -Name $outboundInternetAccessRuleName -NetworkSecurityGroup $nsgs["compute"] -Access $config.sre.remoteDesktop.networkRules.outboundInternet -$null = Update-NetworkSecurityGroupRule -Name $outboundInternetAccessRuleName -NetworkSecurityGroup $nsgs["webapps"] -Access $config.sre.remoteDesktop.networkRules.outboundInternet - - -# List all NICs associated with each NSG -# -------------------------------------- -foreach ($nsgName in $nsgs.Keys) { - Add-LogMessage -Level Info "NICs associated with $($nsgs[$nsgName].Name):" - @($nsgs[$nsgName].NetworkInterfaces) | ForEach-Object { Add-LogMessage -Level Info "=> $($_.Id.Split('/')[-1])" } - foreach ($linkedSubnet in $nsgs[$nsgName].Subnets) { - $subnet = Get-Subnet -Name $linkedSubnet.Id.Split("/")[-1] -VirtualNetworkName $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg - $null = $subnet.IpConfigurations.Id | Where-Object { $_ } | ForEach-Object { Add-LogMessage -Level Info "=> $($_.Split('/')[-3])" } - } -} - - -# Ensure SRE is peered to correct mirror/proxy set -# ------------------------------------------------ -# Unpeer any existing networks before (re-)establishing correct peering for SRE -& $(Join-Path $PSScriptRoot "Unpeer_SRE_Package_Repositories.ps1") -shmId $shmID -sreId $sreId -# Peer this SRE to the repository network -Add-LogMessage -Level Info "Ensuring SRE is peered to correct package repository..." -if (-not $config.sre.repositories.network.name) { - Add-LogMessage -Level InfoSuccess "No package repository network is configured for SRE $($config.sre.id) [tier $($config.sre.tier)]. Nothing to do." -} else { - Set-VnetPeering -Vnet1Name $config.sre.network.vnet.name ` - -Vnet1ResourceGroupName $config.sre.network.vnet.rg ` - -Vnet1SubscriptionName $config.sre.subscriptionName ` - -Vnet2Name $config.sre.repositories.network.name ` - -Vnet2ResourceGroupName $config.shm.network.vnet.rg ` - -Vnet2SubscriptionName $config.shm.subscriptionName -} - - -# Update SRE package repository details -# ------------------------------------- -# Set PyPI and CRAN locations on the SRD -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop -$scriptPath = Join-Path $PSScriptRoot ".." "remote" "network_configuration" "scripts" "update_mirror_settings.sh" -$repositoryFacingVms = Get-AzVM | Where-Object ($_.ResourceGroupName -eq $config.sre.srd.rg) -foreach ($VM in $repositoryFacingVms) { - Add-LogMessage -Level Info "Ensuring that PyPI and CRAN locations are set correctly on $($VM.Name)" - $params = @{ - CRAN_MIRROR_INDEX_URL = $config.sre.repositories.cran.url - PYPI_MIRROR_INDEX = $config.sre.repositories.pypi.index - PYPI_MIRROR_INDEX_URL = $config.sre.repositories.pypi.indexUrl - PYPI_MIRROR_HOST = $config.sre.repositories.pypi.host - } - $null = Invoke-RemoteScript -Shell "UnixShell" -ScriptPath $scriptPath -VMName $VM.Name -ResourceGroupName $VM.ResourceGroupName -Parameter $params -} -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Block external DNS queries -# -------------------------- -& $(Join-Path $PSScriptRoot "Configure_External_DNS_Queries.ps1") -shmId $shmId -sreId $sreId - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Configure_External_DNS_Queries.ps1 b/deployment/secure_research_environment/setup/Configure_External_DNS_Queries.ps1 deleted file mode 100644 index ef12503dc2..0000000000 --- a/deployment/secure_research_environment/setup/Configure_External_DNS_Queries.ps1 +++ /dev/null @@ -1,101 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $false, HelpMessage = "Last octet of IP address for SRD to test DNS lockdown. Defaults to '160'")] - [string]$srdIpLastOctet -) -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzurePrivateDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - -# Construct list of always-allowed FQDNs -# -------------------------------------- -$firewallRules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." ".." "safe_haven_management_environment" "network_rules" "shm-firewall-rules.json") -Parameters $config.shm -AsHashtable -$allowedFqdns = @($firewallRules.applicationRuleCollections | ForEach-Object { $_.properties.rules.targetFqdns }) + - @(Get-PrivateDnsZones -ResourceGroupName $config.shm.network.vnet.rg -SubscriptionName $config.shm.subscriptionName | ForEach-Object { $_.Name }) + - @("docker.io") -# List all unique FQDNs -$allowedFqdns = $allowedFqdns | - Where-Object { $_ -notlike "*-sb.servicebus.windows.net" } | # Remove AzureADConnect password reset endpoints - Where-Object { $_ -notlike "pksproddatastore*.blob.core.windows.net" } | # Remove AzureAD operations endpoints - Sort-Object -Unique -Add-LogMessage -Level Info "Restricted networks will be allowed to run DNS lookup on the following $($allowedFqdns.Count) FQDNs:" -foreach ($allowedFqdn in $allowedFqdns) { Add-LogMessage -Level Info "... $allowedFqdn" } -# Allow DNS resolution for arbitrary subdomains under a private link -# Note: this does NOT guarantee that we control the subdomain, but there is currently no way to dynamically resolve only those subdomains belonging to the private link -$allowedFqdns = $allowedFqdns | ForEach-Object { $_.Replace("privatelink", "*") } - - -# Construct lists of CIDRs to apply restrictions to -# ------------------------------------------------- -if ($config.sre.remoteDesktop.networkRules.outboundInternet -eq "Allow") { - $cidrsToRestrict = @() - $cidrsToAllow = @($config.sre.network.vnet.subnets.compute.cidr, $config.sre.network.vnet.subnets.databases.cidr, $config.sre.network.vnet.subnets.deployment.cidr, $config.sre.network.vnet.subnets.webapps.cidr) -} else { - $cidrsToRestrict = @($config.sre.network.vnet.subnets.compute.cidr, $config.sre.network.vnet.subnets.databases.cidr, $config.sre.network.vnet.subnets.webapps.cidr) - $cidrsToAllow = @($config.sre.network.vnet.subnets.deployment.cidr) -} - - -# Configure external DNS resolution for SRDs via SHM DNS servers -# -------------------------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$params = @{ - AllowedFqdnsCommaSeparatedList = ($allowedFqdns -join ",") - RestrictedCidrsCommaSeparatedList = ($cidrsToRestrict -join ",") - SreId = $config.sre.id - UnrestrictedCidrsCommaSeparatedList = ($cidrsToAllow -join ",") -} -$scriptPath = Join-Path $PSScriptRoot ".." "remote" "network_configuration" "scripts" "Configure_External_DNS_Queries_Remote.ps1" -foreach ($dnsServerName in @($config.shm.dc.vmName, $config.shm.dcb.vmName)) { - Add-LogMessage -Level Info "Configuring external DNS resolution for SRDs via ${dnsServerName}..." - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $dnsServerName -ResourceGroupName $config.shm.dc.rg -Parameter $params -} -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Validate external DNS resolution is blocked from SRDs -# ----------------------------------------------------- -# Get VM for provided IP address -$computeVmIds = @(Get-AzVM -ResourceGroupName $config.sre.srd.rg | ForEach-Object { $_.Id }) -$computeVmIpAddresses = @(Get-AzNetworkInterface | Where-Object { $_.VirtualMachine.Id -in $computeVmIds } | ForEach-Object { $_.IpConfigurations.PrivateIpAddress }) -if (-not $srdIpLastOctet) { - $srdIpLastOctet = $computeVmIpAddresses[0].Split(".")[3] - Add-LogMessage -Level Warning "Test SRD not specified by providing last octet of its IP address. Attempting to test on SRD with last octet of '$srdIpLastOctet'." -} -$vmIpAddress = @($computeVmIpAddresses | Where-Object { $_.Split(".")[3] -eq $srdIpLastOctet })[0] -Add-LogMessage -Level Info "Looking for SRD with IP address '$vmIpAddress'..." -if (-not $vmIpAddress) { - Add-LogMessage -Level Fatal "No SRD found with IP address '$vmIpAddress'. Cannot run test to confirm external DNS resolution." -} else { - # Match on IP address within approriate SRE resource group - $vmName = @(Get-AzNetworkInterface -ResourceGroupName $config.sre.srd.rg | Where-Object { $_.IpConfigurations.PrivateIpAddress -eq $vmIpAddress } | ForEach-Object { $_.VirtualMachine.Id.Split("/")[-1] })[0] - Add-LogMessage -Level Info "Testing external DNS resolution on VM '$vmName'..." - $params = @{ - SHM_DOMAIN_FQDN = $config.shm.domain.fqdn - SHM_DC1_FQDN = $config.shm.dc.fqdn - SHM_DC2_FQDN = $config.shm.dcb.fqdn - OUTBOUND_INTERNET = $config.sre.remoteDesktop.networkRules.outboundInternet - } - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "network_configuration" "scripts" "test_external_dns_resolution_fails.sh" - $null = Invoke-RemoteScript -Shell "UnixShell" -ScriptPath $scriptPath -VMName $vmName -ResourceGroupName $config.sre.srd.rg -Parameter $params -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Deploy_SRE.ps1 b/deployment/secure_research_environment/setup/Deploy_SRE.ps1 deleted file mode 100644 index 5c8d887380..0000000000 --- a/deployment/secure_research_environment/setup/Deploy_SRE.ps1 +++ /dev/null @@ -1,154 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox'). Maximum of seven alphanumeric characters")] - [string]$sreId, - [Parameter(Mandatory = $true, HelpMessage = "Array of sizes of SRDs to deploy. For example: 'Standard_D2s_v3', 'default', 'Standard_NC6s_v3'")] - [string[]]$VmSizes, - [Parameter(Mandatory = $false, HelpMessage = "Remove any remnants of previous deployments of this SRE from the SHM")] - [switch]$Clean, - [Parameter(Mandatory = $false, HelpMessage = "Use device authentication for connecting to Azure and Microsoft Graph")] - [switch]$UseDeviceAuthentication -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Microsoft.Graph.Authentication -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Connect to Azure -# ---------------- -if (Get-AzContext) { Disconnect-AzAccount | Out-Null } # force a refresh of the Azure token before starting -Add-LogMessage -Level Info "Attempting to authenticate with Azure. Please sign in with an account with admin rights over the subscriptions you plan to use." -if ($UseDeviceAuthentication) { - Connect-AzAccount -UseDeviceAuthentication -ErrorAction Stop -} else { - Connect-AzAccount -ErrorAction Stop -} -if (Get-AzContext) { - Add-LogMessage -Level Success "Authenticated with Azure as $((Get-AzContext).Account.Id)" -} else { - Add-LogMessage -Level Fatal "Failed to authenticate with Azure" -} - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Connect to Microsoft Graph -# -------------------------- -if (Get-MgContext) { Disconnect-MgGraph | Out-Null } # force a refresh of the Microsoft Graph token before starting -Add-LogMessage -Level Info "Attempting to authenticate with Microsoft Graph. Please sign in with an account with admin rights over the Azure Active Directory you plan to use." -if ($UseDeviceAuthentication) { - Connect-MgGraph -TenantId $config.shm.azureAdTenantId -Scopes "Application.ReadWrite.All", "Policy.ReadWrite.ApplicationConfiguration" -UseDeviceAuthentication -ErrorAction Stop -ContextScope Process -} else { - Connect-MgGraph -TenantId $config.shm.azureAdTenantId -Scopes "Application.ReadWrite.All", "Policy.ReadWrite.ApplicationConfiguration" -ErrorAction Stop -ContextScope Process -} -if (Get-MgContext) { - Add-LogMessage -Level Success "Authenticated with Microsoft Graph as $((Get-MgContext).Account)" -} else { - Add-LogMessage -Level Fatal "Failed to authenticate with Microsoft Graph" -} - - -# Check that we are using the correct provider -# -------------------------------------------- -if ($config.sre.remoteDesktop.provider -ne "ApacheGuacamole") { - Add-LogMessage -Level Fatal "You should not be running this script when using remote desktop provider '$($config.sre.remoteDesktop.provider)'" -} - - -# Check Powershell requirements -# ----------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot '..' '..' 'CheckRequirements.ps1')" } - - -# Remove data from previous deployments -# ------------------------------------- -if ($Clean) { - Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Remove_SRE_Data_From_SHM.ps1')" -shmId $shmId -sreId $sreId } -} - - -# Deploy the SRE KeyVault and register users with the SHM -# ------------------------------------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Key_Vault_And_Users.ps1')" -shmId $shmId -sreId $sreId } - - -# Create SRE DNS Zone -# ------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_DNS_Zone.ps1')" -shmId $shmId -sreId $sreId } - - -# Deploy the virtual network -# -------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Networking.ps1')" -shmId $shmId -sreId $sreId } - - -# Deploy storage accounts -# ----------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Storage_Accounts.ps1')" -shmId $shmId -sreId $sreId } - - -# Deploy Guacamole remote desktop -# ------------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Guacamole_Servers.ps1')" -shmId $shmId -sreId $sreId } - - -# Update SSL certificate -# ---------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Update_SRE_SSL_Certificate.ps1')" -shmId $shmId -sreId $sreId } - - -# Deploy web applications (GitLab and CodiMD) -# ------------------------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_WebApp_Servers.ps1')" -shmId $shmId -sreId $sreId } - - -# Deploy databases -# ---------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Databases.ps1')" -shmId $shmId -sreId $sreId } - - -# Deploy SRD VMs -# -------------- -$cpuIpOffset = 160 -$gpuIpOffset = 180 -foreach ($VmSize in $VmSizes) { - if ($VmSize.Replace("Standard_", "").StartsWith("N")) { - Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Add_Single_SRD.ps1')" -shmId $shmId -sreId $sreId -ipLastOctet $gpuIpOffset -vmSize $VmSize } - $gpuIpOffset += 1 - } else { - Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Add_Single_SRD.ps1')" -shmId $shmId -sreId $sreId -ipLastOctet $cpuIpOffset -vmSize $VmSize } - $cpuIpOffset += 1 - } -} - -# Configure network lockdown -# -------------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Apply_SRE_Network_Configuration.ps1')" -shmId $shmId -sreId $sreId } - - -# Configure firewall -# ------------------ -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Firewall.ps1')" -shmId $shmId -sreId $sreId } - - -# Configure monitoring -# -------------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Monitoring.ps1')" -shmId $shmId -sreId $sreId } - - -# Enable backup -# ------------- -Invoke-Command -ScriptBlock { & "$(Join-Path $PSScriptRoot 'Setup_SRE_Backup.ps1')" -shmId $shmId -sreId $sreId } - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Remove_SRE_Data_From_SHM.ps1 b/deployment/secure_research_environment/setup/Remove_SRE_Data_From_SHM.ps1 deleted file mode 100644 index 48a4b00ea4..0000000000 --- a/deployment/secure_research_environment/setup/Remove_SRE_Data_From_SHM.ps1 +++ /dev/null @@ -1,175 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Dns -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module Az.Storage -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Look for resources in this subscription -# --------------------------------------- -$sreResourceGroups = Get-SreResourceGroups -sreConfig $config -$sreResourceGroupNames = $sreResourceGroups | ForEach-Object { $_.ResourceGroupName } -$sreResources = $sreResourceGroupNames ? (Get-AzResource | Where-Object { $sreResourceGroupNames.Contains($_.ResourceGroupName) }) : @() - -# If resources are found then print a warning message -if ($sreResources -or $sreResourceGroups) { - Add-LogMessage -Level Warning "SRE data should not be deleted from the SHM unless all SRE resources have been deleted from the subscription!" - Add-LogMessage -Level Warning "There are still $($sreResourceGroups.Length) undeleted resource group(s) remaining!" - $sreResourceGroups | ForEach-Object { Add-LogMessage -Level Warning "$($_.ResourceGroupName)" } - Add-LogMessage -Level Warning "There are still $($sreResources.Length) undeleted resource(s) remaining!" - $sreResources | ForEach-Object { Add-LogMessage -Level Warning "... $($_.Name) [$($_.ResourceType)]" } - $confirmation = Read-Host "Do you want to proceed with unregistering SRE $($config.sre.id) from SHM $($config.shm.id) (unsafe)? [y/n]" - while ($confirmation -ne "y") { - if ($confirmation -eq "n") { exit 0 } - $confirmation = Read-Host "Do you want to proceed with unregistering SRE $($config.sre.id) from SHM $($config.shm.id) (unsafe)? [y/n]" - } - -# ... otherwise continuing removing artifacts in the SHM subscription -} else { - $null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop - - # Remove SHM side of peerings involving this SRE - # ---------------------------------------------- - Add-LogMessage -Level Info "Removing peerings between SRE and SHM virtual networks..." - $peeringName = "PEER_$($config.sre.network.vnet.name)" - foreach ($shmVnet in $(Get-AzVirtualNetwork -Name * -ResourceGroupName $config.shm.network.vnet.rg)) { - foreach ($peering in $(Get-AzVirtualNetworkPeering -VirtualNetworkName $shmVnet.Name -ResourceGroupName $config.shm.network.vnet.rg | Where-Object { $_.Name -eq $peeringName })) { - $null = Remove-AzVirtualNetworkPeering -Name $peering.Name -VirtualNetworkName $shmVnet.Name -ResourceGroupName $config.shm.network.vnet.rg -Force - if ($?) { - Add-LogMessage -Level Success "Removal of peering '$($peering.Name)' succeeded" - } else { - Add-LogMessage -Level Fatal "Removal of peering '$($peering.Name)' failed!" - } - } - } - - - # Remove SRE users and groups from SHM DC - # --------------------------------------- - Add-LogMessage -Level Info "Removing SRE users and groups from SHM DC..." - # Load data to remove - $groupNames = $config.sre.domain.securityGroups.Values | ForEach-Object { $_.name } - $userNames = $config.sre.users.computerManagers.Values | ForEach-Object { $_.samAccountName } - $userNames += $config.sre.users.serviceAccounts.Values | ForEach-Object { $_.samAccountName } - $computerNamePatterns = @("*-$($config.sre.id)".ToUpper(), "*-$($config.sre.id)-*".ToUpper()) - # Remove SRE users and groups from SHM DC - $params = @{ - groupNamesB64 = $groupNames | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - userNamesB64 = $userNames | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - computerNamePatternsB64 = $computerNamePatterns | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - } - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "configure_shm_dc" "scripts" "Remove_Users_And_Groups_Remote.ps1" -Resolve - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -Parameter $params - - - # Remove SRE DNS records and private endpoint DNS Zones from SHM DC - # ---------------------------------------------------------------- - Add-LogMessage -Level Info "Removing SRE private DNS records from SHM DC..." - $privateEndpointNames = @($config.sre.storage.persistentdata.account.name, $config.sre.storage.userdata.account.name) | - ForEach-Object { Get-AzStorageAccount -ResourceGroupName $config.shm.storage.persistentdata.rg -Name $_ -ErrorAction SilentlyContinue } | - Where-Object { $_ } | - ForEach-Object { $_.Context.Name } - $params = @{ - ShmFqdn = $config.shm.domain.fqdn - SreFqdn = $config.sre.domain.fqdn - SreId = $config.sre.id - PrivateEndpointFragmentsB64 = $privateEndpointNames | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - } - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "configure_shm_dc" "scripts" "Remove_DNS_Entries_Remote.ps1" -Resolve - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -Parameter $params - - - # Remove SRE DNS Zone - # ------------------- - $null = Set-AzContext -SubscriptionId $config.shm.dns.subscriptionName -ErrorAction Stop - # Check whether the SHM and/or SRE zones exist on Azure - try { - $shmZone = Get-AzDnsZone -Name $config.shm.domain.fqdn -ResourceGroupName $config.shm.dns.rg -ErrorAction Stop - } catch [Microsoft.Rest.Azure.CloudException] { - Add-LogMessage -Level Info "Could not find DNS zone for SHM $($config.shm.id) domain ($($config.shm.domain.fqdn))." - $shmZone = $null - } - try { - $sreZone = Get-AzDnsZone -Name $config.sre.domain.fqdn -ResourceGroupName $config.shm.dns.rg -ErrorAction Stop - } catch [Microsoft.Rest.Azure.CloudException] { - Add-LogMessage -Level Info "Could not find DNS zone for SRE $($config.sre.id) domain ($($config.sre.domain.fqdn))." - $sreZone = $null - } - # If the parent SHM record exists on Azure then we can remove the SRE zone entirely - if ($shmZone) { - # Delete the SRE DNS zone - if ($sreZone) { - Add-LogMessage -Level Info "[ ] Removing $($config.sre.domain.fqdn) DNS Zone" - Remove-AzDnsZone -Name $config.sre.domain.fqdn -ResourceGroupName $config.shm.dns.rg -Confirm:$false - if ($?) { - Add-LogMessage -Level Success "Zone removal succeeded" - } else { - Add-LogMessage -Level Fatal "Zone removal failed!" - } - } - # Remove the SRE NS record - $subdomain = $($config.sre.domain.fqdn).Split(".")[0] - Add-LogMessage -Level Info "[ ] Removing '$subdomain' NS record from SHM $($config.shm.id) DNS zone ($($config.shm.domain.fqdn))" - Remove-AzDnsRecordSet -Name $subdomain -RecordType NS -ZoneName $config.shm.domain.fqdn -ResourceGroupName $config.shm.dns.rg - if ($?) { - Add-LogMessage -Level Success "Record removal succeeded" - } else { - Add-LogMessage -Level Fatal "Record removal failed!" - } - # Otherwise we assume that the source of the SRE DNS record is outside Azure and only remove the SRE-specific records - } else { - if ($sreZone) { - # Remove SRE FQDN A record - Add-LogMessage -Level Info "[ ] Removing '@' A record from SRE $($config.sre.id) DNS zone ($($config.sre.domain.fqdn))" - Remove-AzDnsRecordSet -Name "@" -RecordType A -ZoneName $config.sre.domain.fqdn -ResourceGroupName $config.shm.dns.rg - $success = $? - # Remote desktop server CNAME record - if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - $serverHostname = "$($config.sre.remoteDesktop.guacamole.hostname)".ToLower() - } else { - Add-LogMessage -Level Fatal "Remote desktop type '$($config.sre.remoteDesktop.type)' was not recognised!" - } - if ($serverHostname) { - Add-LogMessage -Level Info "[ ] Removing '$serverHostname' CNAME record from SRE $($config.sre.id) DNS zone ($($config.sre.domain.fqdn))" - Remove-AzDnsRecordSet -Name $serverHostname -RecordType CNAME -ZoneName $config.sre.domain.fqdn -ResourceGroupName $config.shm.dns.rg - $success = $success -and $? - # Let's Encrypt ACME records - foreach ($letsEncryptAcmeDnsRecord in ("_acme-challenge.${serverHostname}".ToLower(), "_acme-challenge.$($config.sre.domain.fqdn)".ToLower(), "_acme-challenge")) { - Add-LogMessage -Level Info "[ ] Removing '$letsEncryptAcmeDnsRecord' TXT record from SRE $($config.sre.id) DNS zone ($($config.sre.domain.fqdn))" - Remove-AzDnsRecordSet -Name $letsEncryptAcmeDnsRecord -RecordType TXT -ZoneName $config.sre.domain.fqdn -ResourceGroupName $config.shm.dns.rg - $success = $success -and $? - } - # Print success/failure message - if ($success) { - Add-LogMessage -Level Success "Record removal succeeded" - } else { - Add-LogMessage -Level Fatal "Record removal failed!" - } - } - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Run_SRE_SRD_Remote_Diagnostics.ps1 b/deployment/secure_research_environment/setup/Run_SRE_SRD_Remote_Diagnostics.ps1 deleted file mode 100644 index 9c6854be36..0000000000 --- a/deployment/secure_research_environment/setup/Run_SRE_SRD_Remote_Diagnostics.ps1 +++ /dev/null @@ -1,65 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $true, ParameterSetName = "ByIPAddress", HelpMessage = "Last octet of IP address eg. '160'")] - [string]$ipLastOctet -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Use the IP last octet to get the VM name -# ---------------------------------------- -$vmNamePrefix = "SRE-$($config.sre.id)-${ipLastOctet}-SRD".ToUpper() -$vmName = (Get-AzVM | Where-Object { $_.Name -match "$vmNamePrefix-\d{1,2}-\d{1,2}-\d{10}" }).Name -if (-not $vmName) { - Add-LogMessage -Level Fatal "Could not find a VM with last IP octet equal to '$ipLastOctet'" -} - - -# Run remote diagnostic scripts -# ----------------------------- -Add-LogMessage -Level Info "Running diagnostic scripts on VM $vmName..." -$params = @{ - DOMAIN_CONTROLLER = $config.shm.dc.fqdn - DOMAIN_JOIN_OU = "'$($config.shm.domain.ous.linuxServers.path)'" - DOMAIN_JOIN_USER = $config.shm.users.computerManagers.linuxServers.samAccountName - DOMAIN_LOWER = $config.shm.domain.fqdn - LDAP_SEARCH_USER = $config.sre.users.serviceAccounts.ldapSearch.samAccountName - LDAP_TEST_USER = $config.shm.users.serviceAccounts.aadLocalSync.samAccountName - SERVICE_PATH = "'$($config.shm.domain.ous.serviceAccounts.path)'" -} -foreach ($scriptNamePair in (("LDAP connection", "check_ldap_connection.sh"), - ("name resolution", "restart_name_resolution_service.sh"), - ("realm join", "rerun_realm_join.sh"), - ("mounted drives", "check_drive_mounts.sh"), - ("SSSD service", "restart_sssd_service.sh"), - ("xrdp service", "restart_xrdp_service.sh"))) { - $name, $diagnostic_script = $scriptNamePair - Add-LogMessage -Level Info "[ ] Configuring $name ($diagnostic_script) on SRD '$vmName'" - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "secure_research_desktop" "scripts" $diagnostic_script - $null = Invoke-RemoteScript -Shell "UnixShell" -ScriptPath $scriptPath -VMName $vmName -ResourceGroupName $config.sre.srd.rg -Parameter $params - if ($?) { - Add-LogMessage -Level Success "Configuring $name on $vmName was successful" - } else { - Add-LogMessage -Level Failure "Configuring $name on $vmName failed!" - } -} - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Backup.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Backup.ps1 deleted file mode 100644 index 8b729b6fc0..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Backup.ps1 +++ /dev/null @@ -1,127 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module $PSScriptRoot/../../common/AzureDataProtection -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Deploy backup resource group -# ---------------------------- -$null = Deploy-ResourceGroup -Name $config.sre.backup.rg -Location $config.shm.location - - -# Register DataProtection provider -# -------------------------------- -$RegistrationState = (Get-AzResourceProvider | Where-Object { $_.ProviderNamespace -eq “Microsoft.DataProtection” }).RegistrationState -while ($RegistrationState -ne "Registered") { - if ($RegistrationState -ne "Registering") { - $null = Register-AzResourceProvider -ProviderNamespace Microsoft.DataProtection - } else { - Start-Sleep 30 - } -} - - -# Deploy data protection backup vault -# ----------------------------------- -$Vault = Deploy-DataProtectionBackupVault -ResourceGroupName $config.sre.backup.rg ` - -VaultName $config.sre.backup.vault.name ` - -Location $config.sre.location - - -# Create blob backup policy -# This enforces the default policy for blobs -# ------------------------------------------ -$Policy = Deploy-DataProtectionBackupPolicy -ResourceGroupName $config.sre.backup.rg ` - -VaultName $config.sre.backup.vault.name ` - -PolicyName $config.sre.backup.blob.policy_name ` - -DataSourceType 'blob' - - -# Get persistent storage account in the SHM -# ----------------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$PersistentStorageAccount = Get-AzStorageAccount -ResourceGroupName $config.shm.storage.persistentdata.rg -Name $config.sre.storage.persistentdata.account.name -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Assign permissions required for blob backup to the Vault's managed identity -# --------------------------------------------------------------------------- -$null = Deploy-RoleAssignment -ObjectId $Vault.IdentityPrincipalId ` - -Scope $PersistentStorageAccount.id ` - -RoleDefinitionName "Storage Account Backup Contributor" - - -# Create blob backup instance -# --------------------------- -$null = Deploy-DataProtectionBackupInstance -BackupPolicyId $Policy.Id ` - -ResourceGroupName $config.sre.backup.rg ` - -VaultName $Vault.Name ` - -DataSourceType 'blob' ` - -DataSourceId $PersistentStorageAccount.Id ` - -DataSourceLocation $PersistentStorageAccount.PrimaryLocation ` - -DataSourceName $PersistentStorageAccount.StorageAccountName - - -# Create disk backup policy -# This enforces the default policy for disks -# ------------------------------------------ -$Policy = Deploy-DataProtectionBackupPolicy -ResourceGroupName $config.sre.backup.rg ` - -VaultName $config.sre.backup.vault.name ` - -PolicyName $config.sre.backup.disk.policy_name ` - -DataSourceType 'disk' - - -# Assign permissions required for disk backup -# Permission to create snapshots in backup resource group -# ------------------------------------------------------- -$null = Deploy-RoleAssignment -ObjectId $Vault.IdentityPrincipalId ` - -ResourceGroupName $config.sre.backup.rg ` - -RoleDefinitionName "Disk Snapshot Contributor" -$selected_rgs = @( - $config.sre.databases.rg - $config.sre.webapps.rg -) -foreach ($rg in $selected_rgs) { - # Permission to create snapshots from disks in relevant resource groups - $null = Deploy-RoleAssignment -ObjectId $Vault.IdentityPrincipalId ` - -ResourceGroupName $rg ` - -RoleDefinitionName "Disk Backup Reader" - - # Permission to create new disks (restore points) in relevant resource groups - $null = Deploy-RoleAssignment -ObjectId $Vault.IdentityPrincipalId ` - -ResourceGroupName $rg ` - -RoleDefinitionName "Disk Restore Operator" -} -Start-Sleep 30 # Allow time for role assignments to persist - - -# Create backup instances for all disks in selected resource groups -# ----------------------------------------------------------------- -$selected_disks = Get-AzDisk | Where-Object { $_.ResourceGroupName -in $selected_rgs } | Where-Object { $_.Name -like "*DATA-DISK" } -foreach ($disk in $selected_disks) { - $null = Deploy-DataProtectionBackupInstance -BackupPolicyId $Policy.Id ` - -ResourceGroupName $config.sre.backup.rg ` - -VaultName $Vault.Name ` - -DataSourceType 'disk' ` - -DataSourceId $disk.Id ` - -DataSourceLocation $disk.Location ` - -DataSourceName $disk.Name -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_DNS_Zone.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_DNS_Zone.ps1 deleted file mode 100644 index 92fdb824e3..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_DNS_Zone.ps1 +++ /dev/null @@ -1,49 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext - - -# Switch to SHM subscription -# -------------------------- -$null = Set-AzContext -Subscription $config.shm.subscriptionName -ErrorAction Stop - - -# Add SRE DNS zone to SHM -# ----------------------- -Add-LogMessage -Level Info "[ ] Adding SRE DNS zone to internal SHM DNS server..." - -$params = @{ - SreFqdn = $config.sre.domain.fqdn -} -$scriptPath = Join-Path $PSScriptRoot ".." "remote" "configure_shm_dc" "scripts" "Create_DNS_Zone_Remote.ps1" -$null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -Parameter $params - - -# Switch to domains subscription -# ------------------------------ -$null = Set-AzContext -Subscription $config.shm.dns.subscriptionName -ErrorAction Stop - - -# Create the DNS Zone and set the parent NS records if required -# ------------------------------------------------------------- -Set-DnsZoneAndParentNSRecords -DnsZoneName $config.sre.domain.fqdn -ResourceGroupName $config.shm.dns.rg - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Databases.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Databases.ps1 deleted file mode 100644 index 1ea687c358..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Databases.ps1 +++ /dev/null @@ -1,223 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $false, HelpMessage = "Force an existing database VM to be redeployed.")] - [switch]$Redeploy -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/RemoteCommands -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -Subscription $config.sre.subscriptionName -ErrorAction Stop - - -# Create database resource group if it does not exist -# --------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.sre.databases.rg -Location $config.sre.location - - -# Ensure that VNet and deployment subnet exist -# -------------------------------------------- -Add-LogMessage -Level Info "Retrieving virtual network '$($config.sre.network.vnet.name)'..." -$virtualNetwork = Get-AzVirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop -$deploymentSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.deployment.name -VirtualNetworkName $virtualNetwork.Name -ResourceGroupName $config.sre.network.vnet.rg - - -# Create each database defined in the config file -# ----------------------------------------------- -foreach ($databaseCfg in $config.sre.databases.instances) { - # Check whether this database VM has already been deployed - # -------------------------------------------------------- - if (Get-AzVM -Name $databaseCfg.vmName -ResourceGroupName $config.sre.databases.rg -ErrorAction SilentlyContinue) { - if ($Redeploy) { - Add-LogMessage -Level Info "Removing existing database VM '$($databaseCfg.vmName)'..." - $null = Remove-VirtualMachine -Name $databaseCfg.vmName -ResourceGroupName $config.sre.databases.rg -Force - if ($?) { - Add-LogMessage -Level Success "Removal of database VM '$($databaseCfg.vmName)' succeeded" - } else { - Add-LogMessage -Level Fatal "Removal of database VM '$($databaseCfg.vmName)' failed!" - } - } else { - Add-LogMessage -Level Warning "Database VM '$($databaseCfg.vmName)' already exists. Use the '-Redeploy' option if you want to remove the existing database and its data and deploy a new one." - } - } - - # Get database subnet and deployment IP address - # --------------------------------------------- - $subnetCfg = $config.sre.network.vnet.subnets[$databaseCfg.subnet] - $subnet = Deploy-Subnet -Name $subnetCfg.name -VirtualNetwork $virtualNetwork -AddressPrefix $subnetCfg.cidr - $deploymentIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.deployment.cidr -VirtualNetwork $virtualNetwork -Verbose - - # Retrieve domain join details from SHM Key Vault - # ----------------------------------------------- - $null = Set-AzContext -Subscription $config.shm.subscriptionName -ErrorAction Stop - Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.shm.keyVault.name)'..." - $domainJoinPassword = Resolve-KeyVaultSecret -VaultName $config.shm.keyVault.name -SecretName $config.shm.users.computerManagers.databaseServers.passwordSecretName -DefaultLength 20 -AsPlaintext - $null = Set-AzContext -Subscription $config.sre.subscriptionName -ErrorAction Stop - - # Retrieve usernames/passwords from SRE Key Vault - # ----------------------------------------------- - Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.sre.keyVault.name)'..." - $dbAdminUsername = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $databaseCfg.dbAdminUsernameSecretName -AsPlaintext - $dbAdminPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $databaseCfg.dbAdminPasswordSecretName -DefaultLength 20 -AsPlaintext - $vmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.keyVault.secretNames.adminUsername -DefaultValue "sre$($config.sre.id)admin".ToLower() -AsPlaintext - $vmAdminPasswordSecure = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $databaseCfg.adminPasswordSecretName -DefaultLength 20 - - # Deploy an SQL server - # -------------------- - if ($databaseCfg.type -eq "MSSQL") { - # Create SQL server from template - Add-LogMessage -Level Info "Preparing to create SQL database $($databaseCfg.vmName) from template..." - $params = @{ - administratorPassword = $vmAdminPasswordSecure - administratorUsername = $vmAdminUsername - bootDiagnosticsAccountName = $config.sre.storage.bootdiagnostics.accountName - privateIpAddress = $deploymentIpAddress - sqlDbAdministratorPassword = $dbAdminPassword # NB. This has to be in plaintext for the deployment to work correctly - sqlDbAdministratorUsername = $dbAdminUsername - sqlServerConnectionPort = $databaseCfg.port - sqlServerEdition = $databaseCfg.sku - sqlServerName = $databaseCfg.vmName - virtualNetworkName = $virtualNetwork.Name - virtualNetworkResourceGroupName = $config.sre.network.vnet.rg - virtualNetworkSubnetName = $config.sre.network.vnet.subnets.deployment.name - vmDataDiskSizeGb = $databaseCfg.disks.data.sizeGb - vmDataDiskType = $databaseCfg.disks.data.type - vmOsDiskSizeGb = $databaseCfg.disks.os.sizeGb - vmOsDiskType = $databaseCfg.disks.os.type - vmSize = $databaseCfg.vmSize - } - Deploy-ArmTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "arm_templates" "sre-mssql2019-server-template.json") -TemplateParameters $params -ResourceGroupName $config.sre.databases.rg - - # Set locale, install updates and reboot - Add-LogMessage -Level Info "Updating $($databaseCfg.vmName)..." - Invoke-WindowsConfiguration -VMName $databaseCfg.vmName -ResourceGroupName $config.sre.databases.rg -TimeZone $config.sre.time.timezone.windows -NtpServer ($config.shm.time.ntp.serverAddresses)[0] -AdditionalPowershellModules @("SqlServer") - - # Change subnets and IP address while the VM is off - Update-VMIpAddress -Name $databaseCfg.vmName -ResourceGroupName $config.sre.databases.rg -Subnet $subnet -IpAddress $databaseCfg.ip - - # Join the VM to the domain and restart it - Add-WindowsVMtoDomain -Name $databaseCfg.vmName ` - -ResourceGroupName $config.sre.databases.rg ` - -DomainName $config.shm.domain.fqdn ` - -DomainJoinUsername $config.shm.users.computerManagers.databaseServers.samAccountName ` - -DomainJoinPassword (ConvertTo-SecureString $domainJoinPassword -AsPlainText -Force) ` - -OUPath $config.shm.domain.ous.databaseServers.path ` - -ForceRestart - - # Lockdown SQL server - Add-LogMessage -Level Info "[ ] Locking down $($databaseCfg.vmName)..." - $serverLockdownCommandPath = (Join-Path $PSScriptRoot ".." "remote" "create_databases" "scripts" "sre-mssql2019-server-lockdown.sql") - $params = @{ - DataAdminGroup = "$($config.shm.domain.netbiosName)\$($config.sre.domain.securityGroups.dataAdministrators.name)" - DbAdminPasswordB64 = $dbAdminPassword | ConvertTo-Base64 - DbAdminUsername = $dbAdminUsername - EnableSSIS = [string]($databaseCfg.enableSSIS) - ResearchUsersGroup = "$($config.shm.domain.netbiosName)\$($config.sre.domain.securityGroups.researchUsers.name)" - ServerLockdownCommandB64 = Get-Content $serverLockdownCommandPath -Raw | ConvertTo-Base64 - SysAdminGroup = "$($config.shm.domain.netbiosName)\$($config.sre.domain.securityGroups.systemAdministrators.name)" - VmAdminUsername = $vmAdminUsername - } - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "create_databases" "scripts" "Lockdown_Sql_Server.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $databaseCfg.vmName -ResourceGroupName $config.sre.databases.rg -Parameter $params - - # Deploy a PostgreSQL server - # -------------------------- - } elseif ($databaseCfg.type -eq "PostgreSQL") { - # Create PostgreSQL server from template - Add-LogMessage -Level Info "Preparing to create PostgreSQL database $($databaseCfg.vmName)..." - - # Retrieve secrets from Key Vaults - Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.sre.keyVault.name)'..." - $dbServiceAccountName = $config.sre.users.serviceAccounts.postgres.name - $dbServiceAccountSamAccountName = $config.sre.users.serviceAccounts.postgres.samAccountName - $dbServiceAccountPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.users.serviceAccounts.postgres.passwordSecretName -DefaultLength 20 -AsPlaintext - $ldapSearchPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.users.serviceAccounts.ldapSearch.passwordSecretName -DefaultLength 20 -AsPlaintext - - # Create an AD service principal - Add-LogMessage -Level Info "Register '$dbServiceAccountName' ($dbServiceAccountSamAccountName) as a service principal for the database..." - $null = Set-AzContext -Subscription $config.shm.subscriptionName -ErrorAction Stop - $params = @{ - Hostname = $databaseCfg.vmName - Name = $dbServiceAccountName - SamAccountName = $dbServiceAccountSamAccountName - ShmFqdn = $config.shm.domain.fqdn - } - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "create_databases" "scripts" "Create_Postgres_Service_Principal.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -Parameter $params - $null = Set-AzContext -Subscription $config.sre.subscriptionName -ErrorAction Stop - - # Deploy NIC and data disk - $bootDiagnosticsAccount = Deploy-StorageAccount -Name $config.sre.storage.bootdiagnostics.accountName -ResourceGroupName $config.sre.storage.bootdiagnostics.rg -Location $config.sre.location - $networkCard = Deploy-NetworkInterface -Name "$($databaseCfg.vmName)-NIC" -ResourceGroupName $config.sre.databases.rg -Subnet $deploymentSubnet -PrivateIpAddress $deploymentIpAddress -Location $config.sre.location - $dataDisk = Deploy-ManagedDisk -Name "$($databaseCfg.vmName)-DATA-DISK" -SizeGB $databaseCfg.disks.data.sizeGb -Type $databaseCfg.disks.data.type -ResourceGroupName $config.sre.databases.rg -Location $config.sre.location - - # Construct the cloud-init file - Add-LogMessage -Level Info "Constructing cloud-init from template..." - $cloudInitTemplate = Get-Content $(Join-Path $PSScriptRoot ".." "cloud_init" "cloud-init-postgres.mustache.yaml" -Resolve) -Raw - - # Insert additional files into the cloud-init template - $cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path $PSScriptRoot ".." "cloud_init" "resources") - $cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path ".." ".." "common" "resources") - - # Expand placeholders in the cloud-init file - $config["postgres"] = @{ - dbAdminPassword = $dbAdminPassword - dbServiceAccountPassword = $dbServiceAccountPassword - domainJoinPassword = $domainJoinPassword - ldapGroupFilter = "(&(objectClass=group)(|(CN=SG $($config.sre.domain.netbiosName) *)(CN=$($config.shm.domain.securityGroups.serverAdmins.name))))" # Using ' *' removes the risk of synchronising groups from an SRE with an overlapping name - ldapPostgresServiceAccountDn = "CN=${dbServiceAccountName},$($config.shm.domain.ous.serviceAccounts.path)" - ldapSearchUserDn = "CN=$($config.sre.users.serviceAccounts.ldapSearch.name),$($config.shm.domain.ous.serviceAccounts.path)" - ldapSearchUserPassword = $ldapSearchPassword - ldapUserFilter = "(&(objectClass=user)(|(memberOf=CN=$($config.sre.domain.securityGroups.researchUsers.name),$($config.shm.domain.ous.securityGroups.path))(memberOf=CN=$($config.shm.domain.securityGroups.serverAdmins.name),$($config.shm.domain.ous.securityGroups.path))))" - vmName = $databaseCfg.vmName - } - $cloudInitTemplate = Expand-MustacheTemplate -Template $cloudInitTemplate -Parameters $config - - # Deploy the VM - $params = @{ - AdminPassword = $vmAdminPasswordSecure - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $cloudInitTemplate - DataDiskIds = @($dataDisk.Id) - ImageSku = $databaseCfg.sku - Location = $config.sre.location - Name = $databaseCfg.vmName - NicId = $networkCard.Id - OsDiskType = $databaseCfg.disks.os.type - ResourceGroupName = $config.sre.databases.rg - Size = $databaseCfg.vmSize - } - $null = Deploy-LinuxVirtualMachine @params - - # Change subnets and IP address while the VM is off - note that the domain join will happen on restart - Update-VMIpAddress -Name $databaseCfg.vmName -ResourceGroupName $config.sre.databases.rg -Subnet $subnet -IpAddress $databaseCfg.ip - # Update DNS records for this VM - Update-VMDnsRecords -DcName $config.shm.dc.vmName -DcResourceGroupName $config.shm.dc.rg -BaseFqdn $config.shm.domain.fqdn -ShmSubscriptionName $config.shm.subscriptionName -VmHostname $databaseCfg.vmName -VmIpAddress $databaseCfg.ip - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Firewall.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Firewall.ps1 deleted file mode 100644 index 26e55ca08a..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Firewall.ps1 +++ /dev/null @@ -1,132 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Load the SRE VNet -# ----------------- -$virtualNetwork = Get-AzVirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg - - -# Load the SHM firewall and ensure it is started (it can be deallocated to save costs or if credit has run out) -# ------------------------------------------------------------------------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$firewall = Start-Firewall -Name $config.shm.firewall.name -ResourceGroupName $config.shm.network.vnet.rg -VirtualNetworkName $config.shm.network.vnet.name -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Deploy a route table for this SRE -# Note that the route table must be in the same subscription as any subnets attached to it so we cannot use the one from the SHM -# ------------------------------------------------------------------------------------------------------------------------------ -$routeTable = Deploy-RouteTable -Name $config.sre.firewall.routeTableName -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location - - -# Load all traffic rules from template -# ------------------------------------ -$config.shm.firewall["privateIpAddress"] = $firewall.IpConfigurations.PrivateIpAddress -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" "sre-firewall-rules.json") -Parameters $config -AsHashtable -$ruleNameFilter = "sre-$($config.sre.id)*" - - -# Add routes to the route table -# We need to keep all routing symmetric, or it will be dropped by the firewall (see eg. https://azure.microsoft.com/en-gb/blog/accessing-virtual-machines-behind-azure-firewall-with-azure-bastion/). -# VPN gateway and Remote Desktop connections do not come via the firewall so they must return by the same route. -# All other requests should be routed via the firewall. -# Rules are applied by looking for the closest CIDR match first, so the general rule from 0.0.0.0/0 will always come last. -# ------------------------------------------------------------------------------------------------------------------------ -foreach ($route in $rules.routes) { - $null = Deploy-Route -Name $route.name -RouteTableName $config.sre.firewall.routeTableName -AppliesTo $route.properties.addressPrefix -NextHop $route.properties.nextHop -} - - -# Attach all non-excluded subnets to the route table that will send traffic through the firewall -# ---------------------------------------------------------------------------------------------- -# The RDG and deployment subnets always have internet access -$excludedSubnetNames = @($config.sre.network.vnet.subnets.remoteDesktop.name, $config.sre.network.vnet.subnets.deployment.name) -# The compute subnet will have internet access according to what is in the config file (eg. for Tier 0 and Tier 1) -if ($config.sre.remoteDesktop.networkRules.outboundInternet -eq "Allow") { - $excludedSubnetNames += $config.sre.network.vnet.subnets.compute.name -} -# Attach all remaining subnets to the route table -foreach ($subnet in $VirtualNetwork.Subnets) { - if ($excludedSubnetNames.Contains($subnet.Name)) { - Add-LogMessage -Level Info "Ensuring that $($subnet.Name) is NOT attached to any route table..." - $VirtualNetwork = Set-AzVirtualNetworkSubnetConfig -VirtualNetwork $VirtualNetwork -Name $subnet.Name -AddressPrefix $subnet.AddressPrefix -RouteTable $null | Set-AzVirtualNetwork - } else { - Add-LogMessage -Level Info "Ensuring that $($subnet.Name) is attached to $($routeTable.Name)..." - $VirtualNetwork = Set-AzVirtualNetworkSubnetConfig -VirtualNetwork $VirtualNetwork -Name $subnet.Name -AddressPrefix $subnet.AddressPrefix -RouteTable $routeTable | Set-AzVirtualNetwork - } -} - - -# Set firewall rules from template -# -------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -# Application rules -foreach ($ruleCollectionName in $firewall.ApplicationRuleCollections | Where-Object { $_.Name -like "$ruleNameFilter*" } | ForEach-Object { $_.Name }) { - $null = $firewall.RemoveApplicationRuleCollectionByName($ruleCollectionName) - Add-LogMessage -Level Info "Removed existing '$ruleCollectionName' application rule collection." -} -if ($rules.applicationRuleCollections) { - foreach ($ruleCollection in $rules.applicationRuleCollections) { - Add-LogMessage -Level Info "Setting rules for application rule collection '$($ruleCollection.name)'..." - foreach ($rule in $ruleCollection.properties.rules) { - $params = @{} - if ($rule.fqdnTags) { $params["TargetTag"] = $rule.fqdnTags } - if ($rule.protocols) { $params["Protocol"] = $rule.protocols } - if ($rule.targetFqdns) { $params["TargetFqdn"] = $rule.targetFqdns } - $firewall = Deploy-FirewallApplicationRule -Name $rule.name -CollectionName $ruleCollection.name -Firewall $firewall -SourceAddress $rule.sourceAddresses -Priority $ruleCollection.properties.priority -ActionType $ruleCollection.properties.action.type @params -LocalChangeOnly - } - } -} else { - Add-LogMessage -Level Warning "No application rules specified." -} -# Network rules -foreach ($ruleCollectionName in $firewall.NetworkRuleCollections | Where-Object { $_.Name -like "$ruleNameFilter*" } | ForEach-Object { $_.Name }) { - $null = $firewall.RemoveNetworkRuleCollectionByName($ruleCollectionName) - Add-LogMessage -Level Info "Removed existing '$ruleCollectionName' network rule collection." -} -if ($rules.networkRuleCollections) { - foreach ($ruleCollection in $rules.networkRuleCollections) { - Add-LogMessage -Level Info "Setting rules for network rule collection '$($ruleCollection.name)'..." - foreach ($rule in $ruleCollection.properties.rules) { - $null = Deploy-FirewallNetworkRule -Name $rule.name -CollectionName $ruleCollection.name -Firewall $firewall -SourceAddress $rule.sourceAddresses -DestinationAddress $rule.destinationAddresses -DestinationPort $rule.destinationPorts -Protocol $rule.protocols -Priority $ruleCollection.properties.priority -ActionType $ruleCollection.properties.action.type -LocalChangeOnly - } - } -} else { - Add-LogMessage -Level Warning "No network rules specified." -} -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Update remote firewall with rule changes -# ---------------------------------------- -Add-LogMessage -Level Info "[ ] Updating remote firewall with rule changes..." -try { - $null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop - $null = Set-AzFirewall -AzureFirewall $firewall -ErrorAction Stop - $null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - Add-LogMessage -Level Success "Updated remote firewall with rule changes." -} catch { - Add-LogMessage -Level Fatal "Failed to update remote firewall with rule changes!" -Exception $_.Exception -} - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Guacamole_Servers.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Guacamole_Servers.ps1 deleted file mode 100644 index ba6cacdf84..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Guacamole_Servers.ps1 +++ /dev/null @@ -1,163 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Dns -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Microsoft.Graph.Authentication -ErrorAction Stop -Import-Module Microsoft.Graph.Applications -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Check that we are using the correct provider -# -------------------------------------------- -if ($config.sre.remoteDesktop.provider -ne "ApacheGuacamole") { - Add-LogMessage -Level Fatal "You should not be running this script when using remote desktop provider '$($config.sre.remoteDesktop.provider)'" -} - - -# Retrieve VNET and subnets -# ------------------------- -Add-LogMessage -Level Info "Retrieving virtual network '$($config.sre.network.vnet.name)'..." -$vnet = Get-AzVirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop -$guacamoleSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.remoteDesktop.name -VirtualNetworkName $vnet.Name -ResourceGroupName $config.sre.network.vnet.rg -$deploymentSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.deployment.name -VirtualNetworkName $vnet.Name -ResourceGroupName $config.sre.network.vnet.rg - - -# Get deployment IP address -# ------------------------- -$deploymentIpAddress = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.deployment.cidr -VirtualNetwork $vnet - - -# Create remote desktop resource group if it does not exist -# --------------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.sre.remoteDesktop.rg -Location $config.sre.location - - -# Deploy a network card with a public IP address -# ---------------------------------------------- -$networkCard = Deploy-NetworkInterface -Name "$($config.sre.remoteDesktop.guacamole.vmName)-NIC" -ResourceGroupName $config.sre.remoteDesktop.rg -Subnet $deploymentSubnet -PrivateIpAddress $deploymentIpAddress -Location $config.sre.location -$publicIp = Deploy-PublicIpAddress -Name "$($config.sre.remoteDesktop.guacamole.vmName)-PIP" -ResourceGroupName $config.sre.remoteDesktop.rg -AllocationMethod Static -Location $config.sre.location -$null = $networkCard | Set-AzNetworkInterfaceIpConfig -Name $networkCard.ipConfigurations[0].Name -SubnetId $deploymentSubnet.Id -PublicIpAddressId $publicIp.Id | Set-AzNetworkInterface - - -# Register AzureAD application -# ---------------------------- -$azureAdApplicationName = "Guacamole SRE $($config.sre.id)" -Add-LogMessage -Level Info "Ensuring that '$azureAdApplicationName' is registered with Azure Active Directory..." -if (Get-MgContext) { - Add-LogMessage -Level Info "Already authenticated against Microsoft Graph" -} else { - Connect-MgGraph -TenantId $config.shm.azureAdTenantId -Scopes "Application.ReadWrite.All", "Policy.ReadWrite.ApplicationConfiguration" -ErrorAction Stop -ContextScope Process -} -try { - $application = Get-MgApplication -Filter "DisplayName eq '$azureAdApplicationName'" -ErrorAction Stop - if ($application) { - Add-LogMessage -Level InfoSuccess "'$azureAdApplicationName' is already registered in Azure Active Directory" - } else { - Add-LogMessage -Level Info "Registering '$azureAdApplicationName' with Azure Active Directory..." - $application = New-MgApplication -DisplayName "$azureAdApplicationName" -SignInAudience "AzureADMyOrg" -Web @{ RedirectUris = @("https://$($config.sre.domain.fqdn)"); ImplicitGrantSettings = @{ EnableIdTokenIssuance = $true } } -ErrorAction Stop - if ($application) { - Add-LogMessage -Level Success "Registered '$azureAdApplicationName' in Azure Active Directory" - } else { - Add-LogMessage -Level Fatal "Failed to register '$azureAdApplicationName' in Azure Active Directory!" - } - } -} catch { - Add-LogMessage -Level Fatal "Could not connect to Microsoft Graph!" -Exception $_.Exception -} - - -# Retrieve passwords from the Key Vault -# ------------------------------------- -Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.sre.keyVault.name)'..." -$vmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.Name -SecretName $config.sre.keyVault.secretNames.adminUsername -DefaultValue "sre$($config.sre.id)admin".ToLower() -AsPlaintext -$vmAdminPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.Name -SecretName $config.sre.remoteDesktop.guacamole.adminPasswordSecretName -DefaultLength 20 -$guacamoleDbPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.Name -SecretName $config.sre.remoteDesktop.guacamole.databaseAdminPasswordSecretName -DefaultLength 20 -AsPlaintext -$ldapSearchPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.users.serviceAccounts.ldapSearch.passwordSecretName -DefaultLength 20 -AsPlaintext - - -# Construct the cloud-init yaml file -# ---------------------------------- -Add-LogMessage -Level Info "Constructing cloud-init from template..." -$cloudInitTemplate = (Join-Path $PSScriptRoot ".." "cloud_init" "cloud-init-guacamole.mustache.yaml") | Get-Item | Get-Content -Raw -$cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path $PSScriptRoot ".." "cloud_init" "resources") -$cloudInitTemplate = Expand-CloudInitResources -Template $cloudInitTemplate -ResourcePath (Join-Path ".." ".." "common" "resources") -# Expand mustache template variables -$config["guacamole"] = @{ - applicationId = $application.AppId - disableCopy = ($config.sre.remoteDesktop.networkRules.copyAllowed ? 'false' : 'true') - disablePaste = ($config.sre.remoteDesktop.networkRules.pasteAllowed ? 'false' : 'true') - internalDbPassword = $guacamoleDbPassword - ipAddressFirstSRD = Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.compute.cidr -Offset 160 - ldapGroupFilter = "(&(objectClass=group)(CN=SG $($config.sre.domain.netbiosName)*))" - ldapSearchUserDn = "CN=$($config.sre.users.serviceAccounts.ldapSearch.name),$($config.shm.domain.ous.serviceAccounts.path)" - ldapSearchUserPassword = $ldapSearchPassword - ldapUserFilter = "(&(objectClass=user)(|(memberOf=CN=$($config.sre.domain.securityGroups.researchUsers.name),$($config.shm.domain.ous.securityGroups.path))(memberOf=CN=$($config.shm.domain.securityGroups.serverAdmins.name),$($config.shm.domain.ous.securityGroups.path))))" - sslCiphers = (Get-SslCipherSuites)["openssl"] | Join-String -Separator ":" - tenantId = $config.shm.azureAdTenantId -} -$cloudInitYaml = Expand-MustacheTemplate -Template $cloudInitTemplate -Parameters $config - - -# Deploy the VM -# ------------- -$null = Deploy-ResourceGroup -Name $config.sre.storage.bootdiagnostics.rg -Location $config.sre.location -$bootDiagnosticsAccount = Deploy-StorageAccount -Name $config.sre.storage.bootdiagnostics.accountName -ResourceGroupName $config.sre.storage.bootdiagnostics.rg -Location $config.sre.location -$params = @{ - AdminPassword = $vmAdminPassword - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $cloudInitYaml - ImageSku = "Ubuntu-latest" - Location = $config.sre.location - Name = $config.sre.remoteDesktop.guacamole.vmName - NicId = $networkCard.Id - OsDiskSizeGb = $config.sre.remoteDesktop.guacamole.disks.os.sizeGb - OsDiskType = $config.sre.remoteDesktop.guacamole.disks.os.type - ResourceGroupName = $config.sre.remoteDesktop.rg - Size = $config.sre.remoteDesktop.guacamole.vmSize -} -$null = Deploy-LinuxVirtualMachine @params - - -# Change subnets and IP address while the VM is off then restart -# -------------------------------------------------------------- -Update-VMIpAddress -Name $config.sre.remoteDesktop.guacamole.vmName -ResourceGroupName $config.sre.remoteDesktop.rg -Subnet $guacamoleSubnet -IpAddress $config.sre.remoteDesktop.guacamole.ip - - -# Add DNS records for Guacamole server -# ------------------------------------ -Deploy-DnsRecordCollection -PublicIpAddress $publicIp.IpAddress ` - -RecordNameA "@" ` - -RecordNameCAA "letsencrypt.org" ` - -RecordNameCName $serverHostname ` - -ResourceGroupName $config.shm.dns.rg ` - -SubscriptionName $config.shm.dns.subscriptionName ` - -TtlSeconds 30 ` - -ZoneName $config.sre.domain.fqdn - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Key_Vault_And_Users.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Key_Vault_And_Users.ps1 deleted file mode 100644 index c5d0faa455..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Key_Vault_And_Users.ps1 +++ /dev/null @@ -1,119 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.KeyVault -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Create secrets resource group if it does not exist -# -------------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.sre.keyVault.rg -Location $config.sre.location - - -# Ensure the Key Vault exists -# --------------------------- -$null = Deploy-KeyVault -Name $config.sre.keyVault.name -ResourceGroupName $config.sre.keyVault.rg -Location $config.sre.location -Set-KeyVaultPermissions -Name $config.sre.keyVault.name -GroupName $config.shm.azureAdminGroupName -Set-AzKeyVaultAccessPolicy -VaultName $config.sre.keyVault.name -ResourceGroupName $config.sre.keyVault.rg -EnabledForDeployment - - -# Ensure that secrets exist in the Key Vault -# ----------------------------------------- -Add-LogMessage -Level Info "Ensuring that secrets exist in Key Vault '$($config.sre.keyVault.name)'..." -# :: Admin usernames -try { - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.keyVault.secretNames.adminUsername -DefaultValue "sre$($config.sre.id)admin".ToLower() -AsPlaintext - Add-LogMessage -Level Success "Ensured that SRE admin usernames exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that SRE admin usernames exist!" -Exception $_.Exception -} -# :: VM admin passwords -try { - # Remote desktop - if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.remoteDesktop.guacamole.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - } else { - Add-LogMessage -Level Fatal "Remote desktop type '$($config.sre.remoteDesktop.type)' was not recognised!" - } - # Other VMs - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.srd.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.codimd.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.gitlab.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - Add-LogMessage -Level Success "Ensured that SRE VM admin passwords exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that SRE VM admin passwords exist!" -Exception $_.Exception -} -# :: Databases -try { - foreach ($keyName in $config.sre.databases.Keys) { - foreach ($dbInstance in $config.sre.databases[$keyName]) { - if ($dbInstance -isnot [System.Collections.IDictionary]) { continue } - $dbAdminUsername = ($keyName -eq "dbpostgresql") ? "postgres" : "sre$($config.sre.id)dbadmin".ToLower() # The postgres admin username is hardcoded as 'postgres' but we save it to the keyvault to ensure a consistent record structure - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $dbInstance.adminPasswordSecretName -DefaultLength 20 -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $dbInstance.dbAdminUsernameSecretName $dbAdminUsername -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $dbInstance.dbAdminPasswordSecretName -DefaultLength 20 -AsPlaintext - } - } - Add-LogMessage -Level Success "Ensured that SRE database secrets exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that SRE database secrets exist!" -Exception $_.Exception -} -# :: Other secrets -try { - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.gitlab.rootPasswordSecretName -DefaultLength 20 -AsPlaintext - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.keyVault.secretNames.npsSecret -DefaultLength 12 -AsPlaintext - Add-LogMessage -Level Success "Ensured that other SRE secrets exist" -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that other SRE secrets exist!" -Exception $_.Exception -} - - -# Retrieve passwords from the Key Vault -# ------------------------------------- -Add-LogMessage -Level Info "Loading secrets for SRE users and groups..." -# Load SRE groups -$groups = $config.sre.domain.securityGroups -# Load SRE service users -$serviceUsers = $config.sre.users.serviceAccounts -foreach ($user in $serviceUsers.Keys) { - $serviceUsers[$user]["password"] = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $serviceUsers[$user]["passwordSecretName"] -DefaultLength 20 -AsPlaintext -} - - -# Add SRE users and groups to SHM -# ------------------------------- -Add-LogMessage -Level Info "[ ] Adding SRE users and groups to SHM..." -$null = Set-AzContext -Subscription $config.shm.subscriptionName -ErrorAction Stop -$params = @{ - ShmSystemAdministratorSgName = $config.shm.domain.securityGroups.serverAdmins.name - GroupsB64 = $groups | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - ServiceUsersB64 = $serviceUsers | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - SecurityOuPath = $config.shm.domain.ous.securityGroups.path - ServiceOuPath = $config.shm.domain.ous.serviceAccounts.path -} -$scriptPath = Join-Path $PSScriptRoot ".." "remote" "configure_shm_dc" "scripts" "Create_New_SRE_User_Service_Accounts_Remote.ps1" -$null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -VMName $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -Parameter $params -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Monitoring.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Monitoring.ps1 deleted file mode 100644 index 5a14c854b3..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Monitoring.ps1 +++ /dev/null @@ -1,121 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.OperationalInsights -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureAutomation -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureOperationalInsights -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzurePrivateDns -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context -# ------------------------------- -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Load SRE virtual networks -# ------------------------- -$sreVirtualNetworks = Get-VirtualNetwork -ResourceGroupName $config.sre.network.vnet.rg - - -# Switch to SHM subscription and connect the private DNS zones to all virtual networks in the SRE -# Note that this must be done before connecting the VMs to log analytics to ensure that they use the private link -# --------------------------------------------------------------------------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -foreach ($PrivateZone in (Get-AzPrivateDnsZone -ResourceGroupName $config.shm.network.vnet.rg)) { - foreach ($virtualNetwork in $sreVirtualNetworks) { - $null = Connect-PrivateDnsToVirtualNetwork -DnsZone $privateZone -VirtualNetwork $virtualNetwork - } -} -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Get log analytics workspace details -# ----------------------------------- -Add-LogMessage -Level Info "[ ] Getting log analytics workspace details..." -try { - $null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop - $workspace = Deploy-LogAnalyticsWorkspace -Name $config.shm.monitoring.loggingWorkspace.name -ResourceGroupName $config.shm.monitoring.rg -Location $config.sre.location - $workspaceKey = Get-AzOperationalInsightsWorkspaceSharedKey -Name $workspace.Name -ResourceGroup $workspace.ResourceGroupName - $null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - Add-LogMessage -Level InfoSuccess "Retrieved log analytics workspace '$($workspace.Name)." -} catch { - Add-LogMessage -Level Fatal "Failed to retrieve log analytics workspace!" -Exception $_.Exception -} - - -# Ensure logging agent is installed on all SRE VMs -# ------------------------------------------------ -Add-LogMessage -Level Info "[ ] Ensuring logging agent is installed on all SRE VMs..." -$sreResourceGroups = Get-SreResourceGroups -sreConfig $config -try { - $null = $sreResourceGroups | ForEach-Object { Get-AzVM -ResourceGroup $_.ResourceGroupName } | ForEach-Object { - Deploy-VirtualMachineMonitoringExtension -VM $_ -WorkspaceId $workspace.CustomerId -WorkspaceKey $workspaceKey.PrimarySharedKey - } - Add-LogMessage -Level Success "Ensured that logging agent is installed on all SRE VMs." -} catch { - Add-LogMessage -Level Fatal "Failed to ensure that logging agent is installed on all SRE VMs!" -Exception $_.Exception -} - - -# Schedule updates for all connected VMs -# Note that we need to be in the SHM subscription to do so -# -------------------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$account = Deploy-AutomationAccount -Name $config.shm.monitoring.automationAccount.name -ResourceGroupName $config.shm.monitoring.rg -Location $config.shm.location -$sreQuery = Deploy-AutomationAzureQuery -Account $account -ResourceGroups $sreResourceGroups -$localTimeZone = Get-TimeZone -Id $config.shm.time.timezone.linux -# Create Windows VM virus definitions update schedule -$windowsDailySchedule = Deploy-AutomationScheduleInDays -Account $account ` - -Name "sre-$($config.sre.id)-windows-definitions".ToLower() ` - -Time "$($config.shm.monitoring.updateServers.schedule.daily_definition_updates.hour):$($config.shm.monitoring.updateServers.schedule.daily_definition_updates.minute)" ` - -TimeZone $localTimeZone -$null = Register-VmsWithAutomationSchedule -Account $account ` - -DurationHours 1 ` - -IncludedUpdateCategories @("Definition") ` - -Query $sreQuery ` - -Schedule $windowsDailySchedule ` - -VmType "Windows" -# Create Windows VM other updates schedule -$windowsWeeklySchedule = Deploy-AutomationScheduleInDays -Account $account ` - -DayInterval 7 ` - -Name "sre-$($config.sre.id)-windows-updates".ToLower() ` - -StartDayOfWeek $config.shm.monitoring.updateServers.schedule.weekly_system_updates.day ` - -Time "$($config.shm.monitoring.updateServers.schedule.weekly_system_updates.hour):$($config.shm.monitoring.updateServers.schedule.weekly_system_updates.minute)" ` - -TimeZone $localTimeZone -$null = Register-VmsWithAutomationSchedule -Account $account ` - -DurationHours 3 ` - -IncludedUpdateCategories @("Critical", "FeaturePack", "Security", "ServicePack", "Tools", "Unclassified", "UpdateRollup", "Updates") ` - -Query $sreQuery ` - -Schedule $windowsWeeklySchedule ` - -VmType "Windows" -# Create Linux VM update schedule -$linuxWeeklySchedule = Deploy-AutomationScheduleInDays -Account $account ` - -DayInterval 7 ` - -Name "sre-$($config.sre.id)-linux-updates".ToLower() ` - -StartDayOfWeek $config.shm.monitoring.updateServers.schedule.weekly_system_updates.day ` - -Time "$($config.shm.monitoring.updateServers.schedule.weekly_system_updates.hour):$($config.shm.monitoring.updateServers.schedule.weekly_system_updates.minute)" ` - -TimeZone $localTimeZone -$null = Register-VmsWithAutomationSchedule -Account $account ` - -DurationHours 3 ` - -IncludedUpdateCategories @("Critical", "Other", "Security", "Unclassified") ` - -Query $sreQuery ` - -Schedule $linuxWeeklySchedule ` - -VmType "Linux" -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Networking.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Networking.ps1 deleted file mode 100644 index d45c7f1a2f..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Networking.ps1 +++ /dev/null @@ -1,97 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Create VNet resource group if it does not exist -# ----------------------------------------------- -$null = Deploy-ResourceGroup -Name $config.sre.network.vnet.rg -Location $config.sre.location - - -# Create VNet and subnets -# ----------------------- -$vnet = Deploy-VirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -AddressPrefix $config.sre.network.vnet.cidr -Location $config.sre.location -DnsServer $config.shm.dc.ip, $config.shm.dcb.ip -$computeSubnet = Deploy-Subnet -Name $config.sre.network.vnet.subnets.compute.name -VirtualNetwork $vnet -AddressPrefix $config.sre.network.vnet.subnets.compute.cidr -$null = Deploy-Subnet -Name $config.sre.network.vnet.subnets.data.name -VirtualNetwork $vnet -AddressPrefix $config.sre.network.vnet.subnets.data.cidr -$databasesSubnet = Deploy-Subnet -Name $config.sre.network.vnet.subnets.databases.name -VirtualNetwork $vnet -AddressPrefix $config.sre.network.vnet.subnets.databases.cidr -$deploymentSubnet = Deploy-Subnet -Name $config.sre.network.vnet.subnets.deployment.name -VirtualNetwork $vnet -AddressPrefix $config.sre.network.vnet.subnets.deployment.cidr -$remoteDesktopSubnet = Deploy-Subnet -Name $config.sre.network.vnet.subnets.remoteDesktop.name -VirtualNetwork $vnet -AddressPrefix $config.sre.network.vnet.subnets.remoteDesktop.cidr -$webappsSubnet = Deploy-Subnet -Name $config.sre.network.vnet.subnets.webapps.name -VirtualNetwork $vnet -AddressPrefix $config.sre.network.vnet.subnets.webapps.cidr - - -# Peer repository vnet to SHM vnet -# -------------------------------- -Set-VnetPeering -Vnet1Name $config.sre.network.vnet.name ` - -Vnet1ResourceGroupName $config.sre.network.vnet.rg ` - -Vnet1SubscriptionName $config.sre.subscriptionName ` - -Vnet2Name $config.shm.network.vnet.name ` - -Vnet2ResourceGroupName $config.shm.network.vnet.rg ` - -Vnet2SubscriptionName $config.shm.subscriptionName ` - -VNet2AllowRemoteGateway - - -# Ensure that compute NSG exists with correct rules and attach it to the compute subnet -# ------------------------------------------------------------------------------------- -$computeNsg = Deploy-NetworkSecurityGroup -Name $config.sre.network.vnet.subnets.compute.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.sre.network.vnet.subnets.compute.nsg.rules) -Parameters $config -AsHashtable -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $computeNsg -Rules $rules -$computeSubnet = Set-SubnetNetworkSecurityGroup -Subnet $computeSubnet -NetworkSecurityGroup $computeNsg - - -# Ensure that database NSG exists with correct rules and attach it to the database subnet -# --------------------------------------------------------------------------------------- -$databasesNsg = Deploy-NetworkSecurityGroup -Name $config.sre.network.vnet.subnets.databases.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.sre.network.vnet.subnets.databases.nsg.rules) -Parameters $config -AsHashtable -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $databasesNsg -Rules $rules -$databasesSubnet = Set-SubnetNetworkSecurityGroup -Subnet $databasesSubnet -NetworkSecurityGroup $databasesNsg - - -# Ensure that deployment NSG exists with correct rules and attach it to the deployment subnet -# ------------------------------------------------------------------------------------------- -$deploymentNsg = Deploy-NetworkSecurityGroup -Name $config.sre.network.vnet.subnets.deployment.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.sre.network.vnet.subnets.deployment.nsg.rules) -Parameters $config -AsHashtable -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $deploymentNsg -Rules $rules -$deploymentSubnet = Set-SubnetNetworkSecurityGroup -Subnet $deploymentSubnet -NetworkSecurityGroup $deploymentNsg - - -# Ensure that webapps NSG exists with correct rules and attach it to the webapps subnet -# ------------------------------------------------------------------------------------- -$webappsNsg = Deploy-NetworkSecurityGroup -Name $config.sre.network.vnet.subnets.webapps.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location -$rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.sre.network.vnet.subnets.webapps.nsg.rules) -Parameters $config -AsHashtable -$null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $webappsNsg -Rules $rules -$webappsSubnet = Set-SubnetNetworkSecurityGroup -Subnet $webappsSubnet -NetworkSecurityGroup $webappsNsg - - -# Set up the correct NSGs and rules for the remote desktop that is being used -# --------------------------------------------------------------------------- -if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - # Ensure that Guacamole NSG exists with correct rules and attach it to the Guacamole subnet - $guacamoleNsg = Deploy-NetworkSecurityGroup -Name $config.sre.network.vnet.subnets.remoteDesktop.nsg.name -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location - $rules = Get-JsonFromMustacheTemplate -TemplatePath (Join-Path $PSScriptRoot ".." "network_rules" $config.sre.network.vnet.subnets.remoteDesktop.nsg.rules) -Parameters $config -AsHashtable - $null = Set-NetworkSecurityGroupRules -NetworkSecurityGroup $guacamoleNsg -Rules $rules - $remoteDesktopSubnet = Set-SubnetNetworkSecurityGroup -Subnet $remoteDesktopSubnet -NetworkSecurityGroup $guacamoleNsg -} else { - Add-LogMessage -Level Fatal "Remote desktop type '$($config.sre.remoteDesktop.type)' was not recognised!" -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_Storage_Accounts.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_Storage_Accounts.ps1 deleted file mode 100644 index cd6a87946e..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_Storage_Accounts.ps1 +++ /dev/null @@ -1,204 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module Az.Storage -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Ensure that a storage account exists in the SHM for this SRE -# ------------------------------------------------------------ -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$null = Deploy-ResourceGroup -Name $config.shm.storage.persistentdata.rg -Location $config.shm.location -$persistentStorageAccount = Deploy-StorageAccount -Name $config.sre.storage.persistentdata.account.name ` - -AccessTier $config.sre.storage.persistentdata.account.accessTier ` - -Kind $config.sre.storage.persistentdata.account.storageKind ` - -Location $config.shm.location ` - -ResourceGroupName $config.shm.storage.persistentdata.rg ` - -SkuName $config.sre.storage.persistentdata.account.performance -# Add a temporary override during deployment -$null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.persistentdata.account.name -ResourceGroupName $config.shm.storage.persistentdata.rg -DefaultAction Allow -Start-Sleep 30 -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Set up containers for persistent data in the SHM -# These are Blob storage mounted over SMB -# ------------------------------------------------ -if (-not $persistentStorageAccount.PrimaryEndpoints.Blob) { - Add-LogMessage -Level Fatal "Storage account '$($config.sre.storage.userdata.account.name)' does not support blob storage! If you attempted to override this setting in your config file, please remove this change." -} -foreach ($receptacleName in $config.sre.storage.persistentdata.containers.Keys) { - if ($config.sre.storage.persistentdata.containers[$receptacleName].mountType -notlike "*SMB*") { - Add-LogMessage -Level Fatal "Currently only file storage mounted over SMB is supported for the '$receptacleName' container! If you attempted to override this setting in your config file, please remove this change." - } - - # When using blob storage we need to mount using a SAS token - if ($config.sre.storage.persistentdata.containers[$receptacleName].mountType -eq "BlobSMB") { - # Deploy the container - $null = Deploy-StorageReceptacle -Name $receptacleName -StorageAccount $persistentStorageAccount -StorageType "Container" - - # As this is a blob container, we need to access it using a SAS token and a private endpoint - # Ensure that the appropriate SAS policy exists - $accessPolicyName = $config.sre.storage.persistentdata.containers[$receptacleName].accessPolicyName - $sasPolicy = Deploy-SasAccessPolicy -Name $accessPolicyName ` - -Permission $config.sre.storage.accessPolicies[$accessPolicyName].permissions ` - -StorageAccount $persistentStorageAccount ` - -ContainerName $receptacleName ` - -ValidityYears 1 - - # As we want to ensure that the SAS token is valid for 1 year from *now* we do not want to re-use old tokens - # We therefore always generate a new token and store it in the Key Vault (note that old tokens will still be valid and will still be stored as old versions of the secret) - # Note that this also protects us against the case when a SAS token corresponding to an old storage receptacle has been stored in the Key Vault - $sasToken = New-StorageReceptacleSasToken -ContainerName $receptacleName -PolicyName $sasPolicy.Policy -StorageAccount $persistentStorageAccount - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.storage.persistentdata.containers[$receptacleName].connectionSecretName -DefaultValue $sasToken -AsPlaintext -ForceOverwrite - - # When using a file share we need to mount using the storage key - } elseif ($config.sre.storage.persistentdata.containers[$receptacleName].mountType -eq "ShareSMB") { - # Deploy the share - $null = Deploy-StorageReceptacle -Name $receptacleName -StorageAccount $persistentStorageAccount -StorageType "Share" - - # Ensure that the appropriate storage key is stored in the SRE Key Vault - $null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop - $storageKey = (Get-AzStorageAccountKey -ResourceGroupName $config.shm.storage.persistentdata.rg -Name $config.sre.storage.persistentdata.account.name | Where-Object { $_.KeyName -eq "key1" }).Value - $null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - $null = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.storage.persistentdata.containers[$receptacleName].connectionSecretName -DefaultValue $storageKey -AsPlaintext -ForceOverwrite - } -} - - -# Set up containers for user data in the SRE -# These are Files storage mounted over NFS -# Note that we *must* register the NFS provider before creating the storage account: -# https://docs.microsoft.com/en-us/azure/storage/files/storage-troubleshooting-files-nfs#cause-3-the-storage-account-was-created-prior-to-registration-completing -# ----------------------------------------------------------------------------------------------------------------------------------------------------------------- -# Register NFS provider -if ((Get-AzProviderFeature -FeatureName AllowNfsFileShares -ProviderNamespace Microsoft.Storage).RegistrationState -eq "NotRegistered") { - $null = Register-AzProviderFeature -FeatureName AllowNfsFileShares -ProviderNamespace Microsoft.Storage - $null = Register-AzResourceProvider -ProviderNamespace Microsoft.Storage -} -# Wait until registration is complete -$progress = 0 -$registrationState = (Get-AzProviderFeature -FeatureName AllowNfsFileShares -ProviderNamespace Microsoft.Storage).RegistrationState -while ($registrationState -ne "Registered") { - $registrationState = (Get-AzProviderFeature -FeatureName AllowNfsFileShares -ProviderNamespace Microsoft.Storage).RegistrationState - $progress = [math]::min(100, $progress + 1) - Write-Progress -Activity "Registering NFS feature in '$((Get-AzContext).Subscription.Name)' subscription" -Status $registrationState -PercentComplete $progress - Start-Sleep 30 -} -$null = Deploy-ResourceGroup -Name $config.sre.storage.userdata.account.rg -Location $config.sre.location -# Note that we disable the https requirement as per the Azure documentation: -# "Double encryption is not supported for NFS shares yet. Azure provides a -# layer of encryption for all data in transit between Azure datacenters -# using MACSec. NFS shares can only be accessed from trusted virtual -# networks and over VPN tunnels. No additional transport layer encryption -# is available on NFS shares." -$userdataStorageAccount = Deploy-StorageAccount -Name $config.sre.storage.userdata.account.name ` - -AccessTier $config.sre.storage.userdata.account.accessTier ` - -Kind $config.sre.storage.userdata.account.storageKind ` - -Location $config.sre.location ` - -ResourceGroupName $config.sre.storage.userdata.account.rg ` - -SkuName $config.sre.storage.userdata.account.performance ` - -AllowHttpTraffic -# Add a temporary override during deployment -$null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.userdata.account.name -ResourceGroupName $config.sre.storage.userdata.account.rg -DefaultAction Allow -Start-Sleep 30 - - -# Ensure that all required userdata containers exist -# -------------------------------------------------- -if (-not $userdataStorageAccount.PrimaryEndpoints.File) { - Add-LogMessage -Level Fatal "Storage account '$($config.sre.storage.userdata.account.name)' does not support file storage! If you attempted to override this setting in your config file, please remove this change." -} -foreach ($receptacleName in $config.sre.storage.userdata.containers.Keys) { - # Ensure that we are using NFS - if ($config.sre.storage.userdata.containers[$receptacleName].mountType -ne "NFS") { - Add-LogMessage -Level Fatal "Currently only file-storage mounted over NFS is supported for the '$receptacleName' container! If you attempted to override this setting in your config file, please remove this change." - } - # Deploy the share and set its quota - $null = Deploy-StorageReceptacle -Name $receptacleName -StorageAccount $userdataStorageAccount -StorageType "NfsShare" - $null = Set-StorageNfsShareQuota -Name $receptacleName -Quota $config.sre.storage.userdata.containers[$receptacleName].sizeGb -StorageAccount $userdataStorageAccount -} - - -# Ensure that SRE artifacts storage account exists -# ------------------------------------------------ -$null = Deploy-ResourceGroup -Name $config.sre.storage.artifacts.rg -Location $config.sre.location -$artifactsStorageAccount = Deploy-StorageAccount -Name $config.sre.storage.artifacts.account.name ` - -AccessTier $config.sre.storage.artifacts.account.accessTier ` - -Kind $config.sre.storage.artifacts.account.storageKind ` - -Location $config.sre.location ` - -ResourceGroupName $config.sre.storage.artifacts.rg ` - -SkuName $config.sre.storage.artifacts.account.performance -# Add a temporary override during deployment -$null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.artifacts.account.name -ResourceGroupName $config.sre.storage.artifacts.rg -DefaultAction Allow -Start-Sleep 30 - - -# Ensure that the storage accounts can be accessed from the SRE VNet through private endpoints -# -------------------------------------------------------------------------------------------- -$dataSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.data.name -VirtualNetworkName $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -foreach ($storageAccount in @($persistentStorageAccount, $userdataStorageAccount, $artifactsStorageAccount)) { - # Set up a private endpoint - Add-LogMessage -Level Info "Setting up private endpoint for '$($storageAccount.StorageAccountName)'" - $privateEndpoint = Deploy-StorageAccountEndpoint -StorageAccount $storageAccount -StorageType "Default" -Subnet $dataSubnet -ResourceGroupName $config.sre.network.vnet.rg -Location $config.sre.location - $privateEndpointIp = (Get-AzNetworkInterface -ResourceId $privateEndpoint.NetworkInterfaces.Id).IpConfigurations[0].PrivateIpAddress - $privateEndpointFqdns = Get-StorageAccountEndpoints -StorageAccount $storageAccount | ForEach-Object { $_.Split("/")[2] } # we want only the FQDN without protocol or trailing slash - # Set up a DNS zone on the SHM DC - $null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop - Add-LogMessage -Level Info "Setting up DNS zones for: $privateEndpointFqdns" - $params = @{ - privateEndpointFqdnsB64 = $privateEndpointFqdns | ConvertTo-Json -Depth 99 | ConvertTo-Base64 - IpAddress = $privateEndpointIp - } - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "create_storage" "Set_DNS_Zone.ps1" - $null = Invoke-RemoteScript -Shell "PowerShell" -ScriptPath $scriptPath -vmName $config.shm.dc.vmName -ResourceGroupName $config.shm.dc.rg -Parameter $params - $null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop -} - - -# Ensure that public access to the storage accounts is only allowed from approved locations -# ----------------------------------------------------------------------------------------- -# Persistent data - allow access from approved IP addresses -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.persistentdata.account.name -ResourceGroupName $config.shm.storage.persistentdata.rg -DefaultAction Deny -foreach ($IpAddress in $config.sre.storage.persistentdata.account.allowedIpAddresses) { - $null = Add-AzStorageAccountNetworkRule -AccountName $config.sre.storage.persistentdata.account.name -ResourceGroupName $config.shm.storage.persistentdata.rg -IPAddressOrRange $IpAddress -} -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop -# Build artifacts - allow access from approved IP addresses -if ($config.sre.storage.artifacts.account.allowedIpAddresses -eq "any") { - $null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.artifacts.account.name -ResourceGroupName $config.sre.storage.artifacts.rg -DefaultAction Allow -} else { - $null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.artifacts.account.name -ResourceGroupName $config.sre.storage.artifacts.rg -DefaultAction Deny - foreach ($IpAddress in $config.sre.storage.artifacts.account.allowedIpAddresses) { - $null = Add-AzStorageAccountNetworkRule -AccountName $config.sre.storage.artifacts.account.name -ResourceGroupName $config.sre.storage.artifacts.rg -IPAddressOrRange $IpAddress - } -} -# User data - deny all access -$null = Update-AzStorageAccountNetworkRuleSet -Name $config.sre.storage.userdata.account.name -ResourceGroupName $config.sre.storage.userdata.account.rg -DefaultAction Deny - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Setup_SRE_WebApp_Servers.ps1 b/deployment/secure_research_environment/setup/Setup_SRE_WebApp_Servers.ps1 deleted file mode 100644 index 35ae6f7e4c..0000000000 --- a/deployment/secure_research_environment/setup/Setup_SRE_WebApp_Servers.ps1 +++ /dev/null @@ -1,139 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureNetwork -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureResources -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Cryptography -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/RemoteCommands -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Templates -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Retrieve passwords from the Key Vault -# ------------------------------------- -Add-LogMessage -Level Info "Creating/retrieving secrets from Key Vault '$($config.sre.keyVault.name)'..." -$ldapSearchUserPassword = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.users.serviceAccounts.ldapSearch.passwordSecretName -DefaultLength 20 -AsPlaintext -$vmAdminUsername = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.keyVault.secretNames.adminUsername -DefaultValue "sre$($config.sre.id)admin".ToLower() -AsPlaintext -$config.sre.storage.persistentdata.ingressSasToken = Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.storage.persistentdata.containers.ingress.connectionSecretName -AsPlaintext - - -# Retrieve VNET and subnets -# ------------------------- -Add-LogMessage -Level Info "Retrieving virtual network '$($config.sre.network.vnet.name)' and subnets..." -try { - $vnet = Get-AzVirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop - $deploymentSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.deployment.name -VirtualNetworkName $vnet.Name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop - $webappsSubnet = Get-Subnet -Name $config.sre.network.vnet.subnets.webapps.name -VirtualNetworkName $vnet.Name -ResourceGroupName $config.sre.network.vnet.rg -ErrorAction Stop - Add-LogMessage -Level Success "Successfully retrieved virtual network '$($config.sre.network.vnet.name)' and subnets." -} catch { - Add-LogMessage -Level Fatal "Failed to retrieve virtual network '$($config.sre.network.vnet.name)'!" -} - - -# Common components -# ----------------- -$null = Deploy-ResourceGroup -Name $config.sre.webapps.rg -Location $config.sre.location -$bootDiagnosticsAccount = Deploy-StorageAccount -Name $config.sre.storage.bootdiagnostics.accountName -ResourceGroupName $config.sre.storage.bootdiagnostics.rg -Location $config.sre.location -$cloudInitBasePath = Join-Path $PSScriptRoot ".." "cloud_init" -Resolve -$ldapSearchUserDn = "CN=$($config.sre.users.serviceAccounts.ldapSearch.name),$($config.shm.domain.ous.serviceAccounts.path)" - - -# Deploy and configure CodiMD VM -# ------------------------------ -Add-LogMessage -Level Info "Constructing CodiMD cloud-init from template..." -# Load the cloud-init template and expand mustache placeholders -$config["codimd"] = @{ - ldapSearchUserDn = $ldapSearchUserDn - ldapSearchUserPassword = $ldapSearchUserPassword - ldapUserFilter = "(&(objectClass=user)(memberOf=CN=$($config.sre.domain.securityGroups.researchUsers.name),$($config.shm.domain.ous.securityGroups.path))(sAMAccountName={{username}}))" - postgresPassword = $(Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.codimd.postgres.passwordSecretName -DefaultLength 20 -AsPlaintext) -} -$codimdCloudInitTemplate = Join-Path $cloudInitBasePath "cloud-init-codimd.mustache.yaml" | Get-Item | Get-Content -Raw -$codimdCloudInitTemplate = Expand-CloudInitResources -Template $codimdCloudInitTemplate -ResourcePath (Join-Path $cloudInitBasePath "resources") -$codimdCloudInitTemplate = Expand-CloudInitResources -Template $codimdCloudInitTemplate -ResourcePath (Join-Path ".." ".." "common" "resources") -$codimdCloudInitTemplate = Expand-MustacheTemplate -Template $codimdCloudInitTemplate -Parameters $config -# Deploy CodiMD VM -$codimdDataDisk = Deploy-ManagedDisk -Name "$($config.sre.webapps.codimd.vmName)-DATA-DISK" -SizeGB $config.sre.webapps.codimd.disks.data.sizeGb -Type $config.sre.webapps.codimd.disks.data.type -ResourceGroupName $config.sre.webapps.rg -Location $config.sre.location -$params = @{ - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.codimd.adminPasswordSecretName -DefaultLength 20) - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $codimdCloudInitTemplate - DataDiskIds = @($codimdDataDisk.Id) - ImageSku = $config.sre.webapps.codimd.osVersion - Location = $config.sre.location - Name = $config.sre.webapps.codimd.vmName - OsDiskSizeGb = $config.sre.webapps.codimd.disks.os.sizeGb - OsDiskType = $config.sre.webapps.codimd.disks.os.type - PrivateIpAddress = (Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.deployment.cidr -VirtualNetwork $vnet) - ResourceGroupName = $config.sre.webapps.rg - Size = $config.sre.webapps.codimd.vmSize - Subnet = $deploymentSubnet -} -$codimdVm = Deploy-LinuxVirtualMachine @params -# Change subnets and IP address while CodiMD VM is off then restart -Update-VMIpAddress -Name $codimdVm.Name -ResourceGroupName $codimdVm.ResourceGroupName -Subnet $webappsSubnet -IpAddress $config.sre.webapps.codimd.ip -# Update DNS records for this VM -Update-VMDnsRecords -DcName $config.shm.dc.vmName -DcResourceGroupName $config.shm.dc.rg -BaseFqdn $config.sre.domain.fqdn -ShmSubscriptionName $config.shm.subscriptionName -VmHostname $config.sre.webapps.codimd.hostname -VmIpAddress $config.sre.webapps.codimd.ip - - -# Deploy and configure GitLab VM -# ------------------------------ -Add-LogMessage -Level Info "Constructing GitLab cloud-init from template..." -# Load the cloud-init template and expand mustache placeholders -$config["gitlab"] = @{ - ldapSearchUserDn = $ldapSearchUserDn - ldapSearchUserPassword = $ldapSearchUserPassword - ldapUserFilter = "(&(objectClass=user)(memberOf=CN=$($config.sre.domain.securityGroups.researchUsers.name),$($config.shm.domain.ous.securityGroups.path)))" - rootPassword = $(Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.gitlab.rootPasswordSecretName -DefaultLength 20 -AsPlaintext) -} -$gitlabCloudInitTemplate = Join-Path $cloudInitBasePath "cloud-init-gitlab.mustache.yaml" | Get-Item | Get-Content -Raw -$gitlabCloudInitTemplate = Expand-CloudInitResources -Template $gitlabCloudInitTemplate -ResourcePath (Join-Path $cloudInitBasePath "resources") -$gitlabCloudInitTemplate = Expand-CloudInitResources -Template $gitlabCloudInitTemplate -ResourcePath (Join-Path ".." ".." "common" "resources") -$gitlabCloudInitTemplate = Expand-MustacheTemplate -Template $gitlabCloudInitTemplate -Parameters $config -# Deploy GitLab VM -$gitlabDataDisk = Deploy-ManagedDisk -Name "$($config.sre.webapps.gitlab.vmName)-DATA-DISK" -SizeGB $config.sre.webapps.gitlab.disks.data.sizeGb -Type $config.sre.webapps.gitlab.disks.data.type -ResourceGroupName $config.sre.webapps.rg -Location $config.sre.location -$params = @{ - AdminPassword = (Resolve-KeyVaultSecret -VaultName $config.sre.keyVault.name -SecretName $config.sre.webapps.gitlab.adminPasswordSecretName -DefaultLength 20) - AdminUsername = $vmAdminUsername - BootDiagnosticsAccount = $bootDiagnosticsAccount - CloudInitYaml = $gitlabCloudInitTemplate - DataDiskIds = @($gitlabDataDisk.Id) - ImageSku = $config.sre.webapps.gitlab.osVersion - Location = $config.sre.location - Name = $config.sre.webapps.gitlab.vmName - OsDiskSizeGb = $config.sre.webapps.gitlab.disks.os.sizeGb - OsDiskType = $config.sre.webapps.gitlab.disks.os.type - PrivateIpAddress = (Get-NextAvailableIpInRange -IpRangeCidr $config.sre.network.vnet.subnets.deployment.cidr -VirtualNetwork $vnet) - ResourceGroupName = $config.sre.webapps.rg - Size = $config.sre.webapps.gitlab.vmSize - Subnet = $deploymentSubnet -} -$gitlabVm = Deploy-LinuxVirtualMachine @params -# Change subnets and IP address while GitLab VM is off then restart -Update-VMIpAddress -Name $gitlabVm.Name -ResourceGroupName $gitlabVm.ResourceGroupName -Subnet $webappsSubnet -IpAddress $config.sre.webapps.gitlab.ip -# Update DNS records for this VM -Update-VMDnsRecords -DcName $config.shm.dc.vmName -DcResourceGroupName $config.shm.dc.rg -BaseFqdn $config.sre.domain.fqdn -ShmSubscriptionName $config.shm.subscriptionName -VmHostname $config.sre.webapps.gitlab.hostname -VmIpAddress $config.sre.webapps.gitlab.ip - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Unpeer_SRE_Package_Repositories.ps1 b/deployment/secure_research_environment/setup/Unpeer_SRE_Package_Repositories.ps1 deleted file mode 100644 index 02b7a39eb0..0000000000 --- a/deployment/secure_research_environment/setup/Unpeer_SRE_Package_Repositories.ps1 +++ /dev/null @@ -1,68 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Get SRE virtual network -# ----------------------- -Add-LogMessage -Level Info "Removing all existing mirror peerings..." -$sreVnet = Get-AzVirtualNetwork -Name $config.sre.network.vnet.name -ResourceGroupName $config.sre.network.vnet.rg - - -# Remove SHM side of mirror and repository peerings involving this SRE -# -------------------------------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.shm.subscriptionName -ErrorAction Stop -$mirrorVnets = Get-AzVirtualNetwork | Where-Object { $_.Name -like "*MIRROR*" -or $_.Name -like "*REPOSITORY" } -foreach ($mirrorVnet in $mirrorVnets) { - $mirrorPeerings = Get-AzVirtualNetworkPeering -Name "*" -VirtualNetwork $mirrorVnet.Name -ResourceGroupName $mirrorVnet.ResourceGroupName - foreach ($mirrorPeering in $mirrorPeerings) { - # Remove peerings that involve this SRE - if ($mirrorPeering.RemoteVirtualNetwork.Id -eq $sreVnet.Id) { - Add-LogMessage -Level Info "[ ] Removing peering $($mirrorPeering.Name): $($mirrorPeering.VirtualNetworkName) -> $($sreVnet.Name)" - $null = Remove-AzVirtualNetworkPeering -Name $mirrorPeering.Name -VirtualNetworkName $mirrorVnet.Name -ResourceGroupName $mirrorVnet.ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Peering removal succeeded" - } else { - Add-LogMessage -Level Fatal "Peering removal failed!" - } - } - } -} - - -# Remove peering to this SRE from each SHM mirror or repository network -# --------------------------------------------------------------------- -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop -$srePeerings = Get-AzVirtualNetworkPeering -Name "*" -VirtualNetwork $sreVnet.Name -ResourceGroupName $sreVnet.ResourceGroupName -foreach ($srePeering in $srePeerings) { - # Remove peerings that involve any of the mirror VNets - $peeredVnets = $mirrorVnets | Where-Object { $_.Id -eq $srePeering.RemoteVirtualNetwork.Id } - foreach ($mirrorVnet in $peeredVnets) { - Add-LogMessage -Level Info "[ ] Removing peering $($srePeering.Name): $($srePeering.VirtualNetworkName) -> $($mirrorVnet.Name)" - $null = Remove-AzVirtualNetworkPeering -Name $srePeering.Name -VirtualNetworkName $sreVnet.Name -ResourceGroupName $sreVnet.ResourceGroupName -Force - if ($?) { - Add-LogMessage -Level Success "Peering removal succeeded" - } else { - Add-LogMessage -Level Fatal "Peering removal failed!" - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Update_SRE_Guacamole_Dashboard.ps1 b/deployment/secure_research_environment/setup/Update_SRE_Guacamole_Dashboard.ps1 deleted file mode 100644 index 4a51d51abd..0000000000 --- a/deployment/secure_research_environment/setup/Update_SRE_Guacamole_Dashboard.ps1 +++ /dev/null @@ -1,66 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - - -# Check that we are using the correct provider -# -------------------------------------------- -if ($config.sre.remoteDesktop.provider -ne "ApacheGuacamole") { - Add-LogMessage -Level Fatal "You should not be running this script when using remote desktop provider '$($config.sre.remoteDesktop.provider)'" -} - - -# Get list of SRDs -# ---------------- -Add-LogMessage -Level Info "Retrieving list of SRD VMs..." -$VMs = Get-AzVM -ResourceGroupName $config.sre.srd.rg | ` - Where-Object { $_.Name -like "*SRD*" } | ` - ForEach-Object { - $VM = $_; - $VMSize = Get-AzVMSize -Location $config.sre.location | Where-Object { $_.Name -eq $VM.HardwareProfile.VmSize }; - @{ - "type" = (($VM.HardwareProfile.VmSize).StartsWith("N") ? "GPU" : "CPU") - "ipAddress" = (Get-AzNetworkInterface | Where-Object { $_.VirtualMachine.Id -eq $VM.Id }).IpConfigurations[0].PrivateIpAddress - "cores" = $VMSize.NumberOfCores - "memory" = $VMSize.MemoryInMB * 1mb / 1gb - "os" = $VM.OSProfile.WindowsConfiguration ? "Windows" : "Ubuntu" - } - } | Sort-Object -Property ipAddress - - -# Add an index to each Ubuntu and Windows VM -# The number increases with IP address -# ------------------------------------------ -$VMs | ForEach-Object { $idxUbuntu = 0; $idxWindows = 0 } { - if ($_.os -eq "Windows") { $_.index = $idxWindows; $idxWindows++ } - elseif ($_.os -eq "Ubuntu") { $_.index = $idxUbuntu; $idxUbuntu++ } -} - - -# Update the remote file list -# --------------------------- -Add-LogMessage -Level Info "Updating Guacamole with $(@($VMs).Count) VMs..." -$lines = @("#! /bin/bash", "truncate -s 0 /opt/postgresql/data/connections.csv") -$lines += $VMs | ForEach-Object { "echo '$($_.os)-$($_.index) [$($_.cores)$($_.type)s $($_.memory)GB] ($($_.ipAddress));$($_.ipAddress)' >> /opt/postgresql/data/connections.csv" } -$lines += @("/opt/pg-ldap-sync/synchronise_database.sh") -$null = Invoke-RemoteScript -VMName $config.sre.remoteDesktop.guacamole.vmName -ResourceGroupName $config.sre.remoteDesktop.rg -Shell "UnixShell" -Script ($lines | Join-String -Separator "`n") - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Update_SRE_SSL_Certificate.ps1 b/deployment/secure_research_environment/setup/Update_SRE_SSL_Certificate.ps1 deleted file mode 100644 index 61c87cec5b..0000000000 --- a/deployment/secure_research_environment/setup/Update_SRE_SSL_Certificate.ps1 +++ /dev/null @@ -1,290 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId, - [Parameter(Mandatory = $false, HelpMessage = "Do a 'dry run' against the Let's Encrypt staging server.")] - [switch]$dryRun, - [Parameter(Mandatory = $false, HelpMessage = "Force the installation step even for dry runs.")] - [switch]$forceInstall -) - -# Import modules -# -------------- -Import-Module Az.Accounts -Import-Module Az.Compute -Import-Module Az.KeyVault -Import-Module $PSScriptRoot/../../common/AzureCompute -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureKeyVault -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Check that we are authenticated in Azure -# ---------------------------------------- -$azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile -if (-not $azProfile.Accounts.Count) { - Add-LogMessage -Level Fatal "Could not find a valid AzProfile, please run Connect-AzAccount!" -} - -# Get config and original context -# ------------------------------- -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -Subscription $config.sre.subscriptionName -ErrorAction Stop - - -# Set certificate name -# -------------------- -$certificateName = $config.sre.keyVault.secretNames.letsEncryptCertificate -if ($dryRun) { $certificateName += "-dryrun" } - - -# Get any additional FQDNs for this VM -# ------------------------------------ -if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - $remoteDesktopVmFqdn = $config.sre.remoteDesktop.guacamole.fqdn -} else { - Add-LogMessage -Level Fatal "SSL certificate updating is not configured for remote desktop type '$($config.sre.remoteDesktop.type)'!" -} - - -# Check for existing certificate in Key Vault -# ------------------------------------------- -Add-LogMessage -Level Info "[ ] Checking whether signed certificate '$certificateName' already exists in Key Vault..." -$kvCertificate = Get-AzKeyVaultCertificate -VaultName $config.sre.keyVault.name -Name $certificateName -$requestCertificate = $false - - -# Determine whether a certificate request is needed -# ------------------------------------------------- -if ($null -eq $kvCertificate) { - Add-LogMessage -Level Info "No certificate found in Key Vault '$($config.sre.keyVault.name)'" - $requestCertificate = $true -} else { - try { - $renewalDate = [datetime]::ParseExact($kvCertificate.Certificate.NotAfter, "MM/dd/yyyy HH:mm:ss", $null).AddDays(-30) - Add-LogMessage -Level Success "Loaded certificate from Key Vault '$($config.sre.keyVault.name)' with earliest renewal date $($renewalDate.ToString('dd MMM yyyy'))" - } catch [System.Management.Automation.MethodInvocationException] { - $renewalDate = $null - } - if (($null -eq $renewalDate) -or ($(Get-Date) -ge $renewalDate)) { - Add-LogMessage -Level Warning "Removing outdated certificate from Key Vault '$($config.sre.keyVault.name)'..." - $null = Remove-AzKeyVaultCertificate -VaultName $config.sre.keyVault.name -Name $certificateName -Force -ErrorAction SilentlyContinue - Start-Sleep 5 # ensure that the removal command has registered before attempting to purge - $null = Remove-AzKeyVaultCertificate -VaultName $config.sre.keyVault.name -Name $certificateName -InRemovedState -Force -ErrorAction SilentlyContinue - $requestCertificate = $true - } -} - - -# Request a new certificate -# ------------------------- -$userFriendlyFqdn = $config.sre.domain.fqdn -if ($requestCertificate) { - Add-LogMessage -Level Info "Preparing to request a new certificate..." - - # Get token for DNS subscription - # ------------------------------ - $azureContext = Set-AzContext -Subscription $config.shm.dns.subscriptionName -ErrorAction Stop - if ($azureContext.TokenCache) { - # Old method: pre Az.Accounts 2.0 - $token = ($azureContext.TokenCache.ReadItems() | Where-Object { ($_.TenantId -eq $azureContext.Subscription.TenantId) -and ($_.Resource -eq "https://management.core.windows.net/") } | Select-Object -First 1).AccessToken - } else { - # New method: hopefully soon to be superceded by a dedicated Get-AzAccessToken cmdlet (https://github.com/Azure/azure-powershell/issues/13337) - $profileClient = New-Object Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient($azProfile) - $token = $profileClient.AcquireAccessToken($azureContext.Tenant.TenantId).AccessToken - } - $null = Set-AzContext -Subscription $config.sre.subscriptionName -ErrorAction Stop - - # Generate a certificate signing request in the Key Vault - # ------------------------------------------------------- - Add-LogMessage -Level Info "Generating a certificate signing request for $($userFriendlyFqdn) to be signed by Let's Encrypt..." - $SubjectName = "CN=$($userFriendlyFqdn),OU=$($config.shm.name),O=$($config.shm.organisation.name),L=$($config.shm.organisation.townCity),S=$($config.shm.organisation.stateCountyRegion),C=$($config.shm.organisation.countryCode)" - $manualPolicy = New-AzKeyVaultCertificatePolicy -ValidityInMonths 3 -IssuerName "Unknown" -SubjectName "$SubjectName" -DnsName "$remoteDesktopVmFqdn" -KeySize 4096 - $manualPolicy.Exportable = $true - $certificateOperation = Add-AzKeyVaultCertificate -VaultName $config.sre.keyVault.name -Name $certificateName -CertificatePolicy $manualPolicy - $success = $? - $csrPath = (New-TemporaryFile).FullName + ".csr" - "-----BEGIN CERTIFICATE REQUEST-----`n" + $certificateOperation.CertificateSigningRequest + "`n-----END CERTIFICATE REQUEST-----" | Out-File -FilePath $csrPath - if ($success) { - Add-LogMessage -Level Success "CSR creation succeeded" - } else { - Add-LogMessage -Level Fatal "CSR creation failed!" - } - - # Run Posh-ACME commands in a subjob to avoid incompatibility with the Az module - # ------------------------------------------------------------------------------ - $certificateFilePath = Start-Job -ArgumentList @($PSScriptRoot, $token, $azureContext.Subscription.Id, $userFriendlyFqdn, $csrPath, $config.shm.organisation.contactEmail, $dryRun) -ScriptBlock { - param( - [string]$ScriptRoot, - [string]$AZAccessToken, - [string]$AZSubscriptionId, - [string]$userFriendlyFqdn, - [string]$CsrPath, - [string]$EmailAddress, - [bool]$dryRun - ) - - # Ensure that Posh-ACME is installed for current user - # --------------------------------------------------- - if (-not (Get-Module -ListAvailable -Name Posh-ACME)) { - Install-Module -Name Posh-ACME -Scope CurrentUser -Force - } - Import-Module Posh-ACME -Force -ErrorAction Stop - Import-Module $ScriptRoot/../../common/Logging -ErrorAction Stop - - - # Set the Posh-ACME server to the appropriate Let's Encrypt endpoint - # ------------------------------------------------------------------ - if ($dryRun) { - Add-LogMessage -Level Info "Using Let's Encrypt staging server (dry-run)" - $null = Set-PAServer LE_STAGE - } else { - Add-LogMessage -Level Info "Using Let's Encrypt production server!" - $null = Set-PAServer LE_PROD - } - - # Set Posh-ACME account - # --------------------- - Add-LogMessage -Level Info "[ ] Checking for Posh-ACME account" - if (-not (Get-PAAccount -List -Contact $EmailAddress)) { - $null = New-PAAccount -Contact $EmailAddress -AcceptTOS - Add-LogMessage -Level Success "Created new Posh-ACME account for email address '$EmailAddress'" - } - $PoshAcmeAccount = Get-PAAccount -List -Contact $EmailAddress - Add-LogMessage -Level Success "Using Posh-ACME account: $($PoshAcmeAccount.Id)" - - # Set Posh-ACME parameters - # ------------------------ - $PoshAcmeParams = @{ - AZSubscriptionId = $AZSubscriptionId - AZAccessToken = $AZAccessToken - } - - # Get the names for the publish and unpublish commands - # ---------------------------------------------------- - $PublishCommandName = Get-Command -Module Posh-ACME -Name "Publish-*Challenge" | ForEach-Object { $_.Name } - $UnpublishCommandName = Get-Command -Module Posh-ACME -Name "Unpublish-*Challenge" | ForEach-Object { $_.Name } - - # Test DNS record creation - # ------------------------ - Add-LogMessage -Level Info "Test that we can interact with DNS records..." - $testDomain = "dnstest.${userFriendlyFqdn}" - Add-LogMessage -Level Info "[ ] Attempting to create a DNS record for $testDomain..." - if ($PublishCommandName -eq "Publish-DnsChallenge") { - Add-LogMessage -Level Warning "The version of the Posh-ACME module that you are using is <4.0.0. Support for this version will be dropped in future." - $null = Publish-DnsChallenge $testDomain -Account $PoshAcmeAccount -Token faketoken -Plugin Azure -PluginArgs $PoshAcmeParams -Verbose - } else { - $null = Publish-Challenge $testDomain -Account $PoshAcmeAccount -Token faketoken -Plugin Azure -PluginArgs $PoshAcmeParams -Verbose - } - if ($?) { - Add-LogMessage -Level Success "DNS record creation succeeded" - } else { - Add-LogMessage -Level Fatal "DNS record creation failed!" - } - Add-LogMessage -Level Info "[ ] Attempting to delete a DNS record for $testDomain..." - if ($UnpublishCommandName -eq "Unpublish-DnsChallenge") { - Add-LogMessage -Level Warning "The version of the Posh-ACME module that you are using is <4.0.0. Support for this version will be dropped in future." - $null = Unpublish-DnsChallenge $testDomain -Account $PoshAcmeAccount -Token faketoken -Plugin Azure -PluginArgs $PoshAcmeParams -Verbose - } else { - $null = Unpublish-Challenge $testDomain -Account $PoshAcmeAccount -Token faketoken -Plugin Azure -PluginArgs $PoshAcmeParams -Verbose - } - if ($?) { - Add-LogMessage -Level Success "DNS record deletion succeeded" - } else { - Add-LogMessage -Level Fatal "DNS record deletion failed!" - } - - # Send a certificate-signing-request to be signed - # ----------------------------------------------- - Add-LogMessage -Level Info "Sending the CSR to be signed by Let's Encrypt..." - if ($PublishCommandName -eq "Publish-DnsChallenge") { - Add-LogMessage -Level Warning "The version of the Posh-ACME module that you are using is <4.0.0. Support for this version will be dropped in future." - $null = Publish-DnsChallenge $userFriendlyFqdn -Account $PoshAcmeAccount -Token faketoken -Plugin Azure -PluginArgs $PoshAcmeParams -Verbose - } else { - $null = Publish-Challenge $userFriendlyFqdn -Account $PoshAcmeAccount -Token faketoken -Plugin Azure -PluginArgs $PoshAcmeParams -Verbose - } - $success = $? - Add-LogMessage -Level Info "[ ] Creating certificate for ${userFriendlyFqdn}..." - $null = New-PACertificate -CSRPath $CsrPath -AcceptTOS -Contact $EmailAddress -DnsPlugin Azure -PluginArgs $PoshAcmeParams -Verbose - $success = $success -and $? - if ($success) { - Add-LogMessage -Level Success "Certificate creation succeeded" - } else { - Add-LogMessage -Level Fatal "Certificate creation failed!" - } - return [string](Get-PACertificate -MainDomain $userFriendlyFqdn).CertFile - } | Receive-Job -Wait -AutoRemoveJob - - - # Import signed certificate - # ------------------------- - Add-LogMessage -Level Info "Importing signed certificate into Key Vault '$($config.sre.keyVault.name)'..." - try { - $kvCertificate = Import-AzKeyVaultCertificate -VaultName $config.sre.keyVault.name -Name $certificateName -FilePath $certificateFilePath -ErrorAction Stop - Add-LogMessage -Level Success "Certificate import succeeded" - } catch { - Add-LogMessage -Level Fatal "Certificate import failed!" -Exception $_.Exception - } -} - - -# Warn if this is a dry run -# ------------------------- -$doInstall = $true -if ($dryRun) { - if ($forceInstall) { - Add-LogMessage -Level Warning "Dry run produces an unsigned certificate! Forcing installation on the gateway anyway!" - } else { - Add-LogMessage -Level Error "Dry run produces an unsigned certificate! Use '-forceInstall' if you want to install this on the gateway anyway" - $doInstall = $false - } -} - - -# Install the certificate on the remote desktop gateway -# ----------------------------------------------------- -if ($doInstall) { - $vaultId = (Get-AzKeyVault -VaultName $config.sre.keyVault.name -ResourceGroupName $config.sre.keyVault.rg).ResourceId - $secretURL = (Get-AzKeyVaultSecret -VaultName $config.sre.keyVault.name -Name $certificateName).Id - - - # Get the appropriate VM, script and parameters for configuring the remote server - # ------------------------------------------------------------------------------- - $addSecretParams = @{} - if ($config.sre.remoteDesktop.provider -eq "ApacheGuacamole") { - $targetVM = Get-AzVM -ResourceGroupName $config.sre.remoteDesktop.rg -Name $config.sre.remoteDesktop.guacamole.vmName | Remove-AzVMSecret - $scriptPath = Join-Path $PSScriptRoot ".." "remote" "create_guacamole" "scripts" "install_ssl_certificate.sh" - $scriptParams = @{ - USER_FRIENDLY_FQDN = $userFriendlyFqdn - CERT_THUMBPRINT = $kvCertificate.Thumbprint - } - $scriptType = "UnixShell" - } else { - Add-LogMessage -Level Fatal "SSL certificate updating is not configured for remote desktop type '$($config.sre.remoteDesktop.type)'!" - } - - - # Add signed Key Vault certificate to the target VM - # ------------------------------------------------- - Add-LogMessage -Level Info "Adding SSL certificate to $($targetVM.Name)" - $targetVM = Add-AzVMSecret -VM $targetVM -SourceVaultId $vaultId -CertificateUrl $secretURL @addSecretParams - $null = Update-AzVM -ResourceGroupName $targetVM.ResourceGroupName -VM $targetVM - if ($?) { - Add-LogMessage -Level Success "Adding certificate with thumbprint $($kvCertificate.Thumbprint) succeeded" - } else { - Add-LogMessage -Level Fatal "Adding certificate with thumbprint $($kvCertificate.Thumbprint) failed!" - } - - - # Install the certificate and private key on the remote server - # ------------------------------------------------------------ - Add-LogMessage -Level Info "Configuring '$($targetVM.Name)' to use signed SSL certificate" - $null = Invoke-RemoteScript -Shell $scriptType -ScriptPath $scriptPath -VMName $targetVM.Name -ResourceGroupName $targetVM.ResourceGroupName -Parameter $scriptParams -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/deployment/secure_research_environment/setup/Update_Stored_Access_Policies.ps1 b/deployment/secure_research_environment/setup/Update_Stored_Access_Policies.ps1 deleted file mode 100644 index 12d2488634..0000000000 --- a/deployment/secure_research_environment/setup/Update_Stored_Access_Policies.ps1 +++ /dev/null @@ -1,38 +0,0 @@ -param( - [Parameter(Mandatory = $true, HelpMessage = "Enter SHM ID (e.g. 'project')")] - [string]$shmId, - [Parameter(Mandatory = $true, HelpMessage = "Enter SRE ID (e.g. 'sandbox')")] - [string]$sreId -) - -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Resources -ErrorAction Stop -Import-Module Az.Storage -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/AzureStorage -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Configuration -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/DataStructures -Force -ErrorAction Stop -Import-Module $PSScriptRoot/../../common/Logging -Force -ErrorAction Stop - - -# Get config and original context before changing subscription -# ------------------------------------------------------------ -$config = Get-SreConfig -shmId $shmId -sreId $sreId -$originalContext = Get-AzContext -$null = Set-AzContext -SubscriptionId $config.sre.subscriptionName -ErrorAction Stop - -# Update each SAS Access Policy to be valid for one year from now -# --------------------------------------------------------------- -$persistentStorageAccount = Get-StorageAccount -ResourceGroupName $config.shm.storage.persistentdata.rg -Name $config.sre.storage.persistentdata.account.name -foreach ($receptacleName in $config.sre.storage.persistentdata.containers.Keys) { - $accessPolicyName = $config.sre.storage.persistentdata.containers[$receptacleName].accessPolicyName - $null = Deploy-SasAccessPolicy -Name $accessPolicyName ` - -Permission $config.sre.storage.accessPolicies[$accessPolicyName].permissions ` - -StorageAccount $persistentStorageAccount ` - -ContainerName $receptacleName ` - -ValidityYears 1 ` - -Force -} - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/docs/requirements.in b/docs/requirements.in deleted file mode 100644 index a6230b9742..0000000000 --- a/docs/requirements.in +++ /dev/null @@ -1,4 +0,0 @@ -emoji -myst-parser -pydata-sphinx-theme -Sphinx diff --git a/docs/requirements.txt b/docs/requirements.txt index f0dce77814..8cd6899341 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,87 +1,104 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile requirements.in +# pip-compile --constraint=requirements-constraints.txt --output-file=docs/requirements.txt - # -accessible-pygments==0.0.4 +accessible-pygments==0.0.5 # via pydata-sphinx-theme -alabaster==0.7.13 +alabaster==0.7.16 # via sphinx -babel==2.12.1 +babel==2.15.0 # via # pydata-sphinx-theme # sphinx -beautifulsoup4==4.12.2 +beautifulsoup4==4.12.3 # via pydata-sphinx-theme -certifi==2023.7.22 +certifi==2024.7.4 # via requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.2 # via requests -docutils==0.19 +docutils==0.21.2 # via # myst-parser # pydata-sphinx-theme # sphinx -emoji==2.2.0 - # via -r requirements.in -idna==3.4 - # via requests + # sphinx-togglebutton +emoji==2.12.1 + # via -r - +idna==3.7 + # via + # -c requirements-constraints.txt + # requests imagesize==1.4.1 # via sphinx -jinja2==3.1.3 +jinja2==3.1.4 # via # myst-parser # sphinx -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # mdit-py-plugins # myst-parser -markupsafe==2.1.2 +markupsafe==2.1.5 # via jinja2 -mdit-py-plugins==0.3.5 +mdit-py-plugins==0.4.1 # via myst-parser mdurl==0.1.2 # via markdown-it-py -myst-parser==1.0.0 - # via -r requirements.in -packaging==23.1 +myst-parser==3.0.1 + # via -r - +packaging==24.1 # via # pydata-sphinx-theme # sphinx -pydata-sphinx-theme==0.13.3 - # via -r requirements.in -pygments==2.15.1 +pydata-sphinx-theme==0.15.4 + # via -r - +pygments==2.18.0 # via # accessible-pygments # pydata-sphinx-theme # sphinx -pyyaml==6.0 +pyyaml==6.0.1 # via myst-parser -requests==2.31.0 - # via sphinx +requests==2.32.3 + # via + # -c requirements-constraints.txt + # sphinx snowballstemmer==2.2.0 # via sphinx -soupsieve==2.4.1 +soupsieve==2.5 # via beautifulsoup4 -sphinx==6.2.1 +sphinx==7.4.7 # via - # -r requirements.in + # -r - # myst-parser # pydata-sphinx-theme -sphinxcontrib-applehelp==1.0.4 + # sphinx-togglebutton +sphinx-togglebutton==0.3.2 + # via -r - +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.1 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.5 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -typing-extensions==4.6.1 - # via pydata-sphinx-theme -urllib3==2.0.7 - # via requests +typing-extensions==4.12.2 + # via + # emoji + # pydata-sphinx-theme +urllib3==2.2.2 + # via + # -c requirements-constraints.txt + # requests +wheel==0.44.0 + # via sphinx-togglebutton + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/docs/source/conf.py b/docs/source/conf.py index c390633259..f262d36dc2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -18,16 +18,23 @@ # -- Customisation ----------------------------------------------------------- # Construct list of emoji substitutions -emoji_codes = set( - [ - emoji_code.replace(":", "") - for emoji_list in ( - emoji.unicode_codes.get_emoji_unicode_dict("en").keys(), - emoji.unicode_codes.get_aliases_unicode_dict().keys(), - ) - for emoji_code in emoji_list - ] -) +# This code reproduces the library functions +# - get_emoji_unicode_dict() +# - get_aliases_unicode_dict() +emoji_codes = set() +for emj, data in emoji.unicode_codes.EMOJI_DATA.items(): + # Only accept fully qualified or component emoji + # See https://www.unicode.org/reports/tr51/#def_emoji_sequence + if data["status"] <= emoji.unicode_codes.STATUS["fully_qualified"]: + # Add the English language name (if any) + if "en" in data: + emoji_codes.add(data["en"]) + # Add each of the list of aliases (if any) + if "alias" in data: + for alias in data["alias"]: + emoji_codes.add(alias) +# Strip leading and trailing colons and sort +emoji_codes = sorted(map(lambda s: s.strip(":"), emoji_codes)) # Set sidebar variables if "html_context" not in globals(): @@ -47,6 +54,7 @@ # ones. extensions = [ "myst_parser", + "sphinx_togglebutton", ] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/source/contributing/index.md b/docs/source/contributing/index.md index 93ff8112ef..3d51d07e46 100644 --- a/docs/source/contributing/index.md +++ b/docs/source/contributing/index.md @@ -18,50 +18,52 @@ David Beavan
David Beavan

📖 🖋 + David Salvador Jasin
David Salvador Jasin

🐛 📖 Diego Arenas
Diego Arenas

💻 🤔 🖋 Ed Chalstrey
Ed Chalstrey

💻 📖 🐛 🤔 📋 👀 ⚠️ Evelina Gabasova
Evelina Gabasova

🖋 Federico Nanni
Federico Nanni

💻 🐛 📖 🤔 Franz Király
Franz Király

🖋 - Guillaume Noell
Guillaume Noell

📖 🐛 🤔 + Guillaume Noell
Guillaume Noell

📖 🐛 🤔 Helen D Little
Helen D Little

🐛 👀 Helen Sherwood-Taylor
Helen Sherwood-Taylor

🤔 🖋 Jack Roberts
Jack Roberts

💻 🐛 James Cunningham
James Cunningham

💻 📖 🐛 🤔 🖋 James Geddes
James Geddes

🖋 James Hetherington
James Hetherington

📖 🐛 🤔 🔍 📆 📣 📢 🖋 - James Robinson
James Robinson

💻 🖋 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ 📢 + James Robinson
James Robinson

💻 🖋 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ 📢 Jim Madge
Jim Madge

💻 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ + Josh Everett
Josh Everett

🐛 Jules M
Jules M

📖 🤔 🐛 🖋 Kirstie Whitaker
Kirstie Whitaker

🖋 📖 🐛 🤔 🔍 📋 📆 📣 📢 📓 Martin O'Reilly
Martin O'Reilly

💻 🖋 📖 🐛 🤔 🔍 📋 🚇 📆 📣 💬 👀 🛡️ ⚠️ 📢 Matt Craddock
Matt Craddock

💻 📖 🐛 🤔 🔍 📋 📣 💬 👀 🛡️ ⚠️ - Oscar T Giles
Oscar T Giles

💻 📖 🤔 - Radka Jersakova
Radka Jersakova

🖋 + Oscar T Giles
Oscar T Giles

💻 📖 🤔 + Radka Jersakova
Radka Jersakova

🖋 Rob Clarke
Rob Clarke

🤔 🐛 💻 📖 🖋 Steven Carlysle-Davies
Steven Carlysle-Davies

💻 🖋 🤔 Tim Hobson
Tim Hobson

💻 🐛 📖 🤔 Tom Doel
Tom Doel

💻 📖 🐛 🤔 🖋 Tomas Lazauskas
Tomas Lazauskas

💻 📖 🐛 🤔 - arielle-bennett
arielle-bennett

🔍 🤔 📆 - bw-faststream
bw-faststream

📖 🤔 🐛 📆 📓 + arielle-bennett
arielle-bennett

🔍 🤔 📆 + bw-faststream
bw-faststream

📖 🤔 🐛 📆 📓 cathiest
cathiest

🖋 📖 🐛 🤔 davsarper
davsarper

📖 📋 🔍 🤔 📆 📣 📢 ens-brett-todd
ens-brett-todd

💻 🤔 ens-george-holmes
ens-george-holmes

💻 🤔 getcarter21
getcarter21

💻 🖋 - harisood
harisood

📖 🐛 🤔 🔍 📋 📆 📣 💬 📢 🛡️ 📓 - kevinxufs
kevinxufs

📖 🤔 🛡️ + harisood
harisood

📖 🐛 🤔 🔍 📋 📆 📣 💬 📢 🛡️ 📓 + kevinxufs
kevinxufs

📖 🤔 🛡️ miguelmorin
miguelmorin

💻 📖 🤔 ⚠️ oforrest
oforrest

📖 🤔 📆 📣 🖋 rwinstanley1
rwinstanley1

📖 🤔 📆 🛡️ diff --git a/docs/source/deployment/build_srd_image.md b/docs/source/deployment/build_srd_image.md deleted file mode 100644 index 9482988160..0000000000 --- a/docs/source/deployment/build_srd_image.md +++ /dev/null @@ -1,155 +0,0 @@ -# Build an SRE compute image - -These instructions will walk you through creating a new VM image for use in the secure research environment. - -```{include} snippets/00_symbols.partial.md -:relative-images: -``` - -## 1. {{seedling}} Prerequisites - -- An `Azure` subscription with sufficient credits to build the environment in -- `Powershell` for `Azure` - - Install [Powershell v6.0 or above](https://docs.microsoft.com/en-us/powershell/azure/install-az-ps?view=azps-2.2.0) - - Install the Azure [Powershell Module](https://docs.microsoft.com/en-us/powershell/azure/install-az-ps?view=azps-2.2.0&viewFallbackFrom=azps-1.3.0) -- `SSH` or `OpenSSH` (not tested on Windows) -- SHM configuration file - - The core properties for the environment must be present in the `environment_configs` folder as described in the {ref}`Safe Haven Management deployment instructions `. - -````{hint} -If you run: - -```powershell -PS> Start-Transcript -Path -``` - -before you start your deployment and - -```powershell -PS> Stop-Transcript -``` - -afterwards, you will automatically get a full log of the Powershell commands you have run. -```` - -### (Optional) Verify code version - -If you have cloned/forked the code from our `GitHub` repository, you can confirm which version of the Data Safe Haven you are currently using by running the following commands: - -![Powershell: a few seconds](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20seconds) - -```powershell -PS> git tag --list | Select-String $(git describe --tags) -``` - -This will check the tag you are using against the list of known tags and print it out. -You can include this confirmation in any record you keep of your deployment. - -## 2. {{gift}} (Optional) Customise the build configuration - -Provisioning a VM with all the Safe Haven software is done using [cloud-init](https://cloudinit.readthedocs.io/en/latest/). -This takes a basic Ubuntu image and installs and configures all the necessary software packages. -In general, this image should cover most use cases, but it's possible that you may want to customise it for your particular circumstances, for example if you want to add a new package or to update the version of an existing package. - -### Adding a new apt package - -- Add the name of the package to `deployment/secure_research_desktop/packages/packages-apt.list` -- If this package adds a new executable that you would like to be available to the end user, you should also add a check for this to the end of `deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-.mustache.yaml` - -````{hint} -For example, to check for `Azure Data Studio`, the following line was added: - -```bash -if [ "$(which azuredatastudio)" ]; then echo "\n\n*azuredatastudio*\n\n$(which azuredatastudio)"; else echo "ERROR azuredatastudio not found!"; exit 1; fi -``` -```` - -### Adding a new Python package - -- Add the name of the package as it appears on `PyPI` to the package list: - - `deployment/secure_research_desktop/packages/packages-python.yaml` - - If there are any restrictions on acceptable versions for this package (e.g. a minimum or exact version) then make sure to specify this -- You should also add this package to the **allow list** used by {ref}`policy_tier_3` package mirrors in `environment_configs/package_lists/allowlist-core-python-pypi-tier3.list` - -### Adding a new R package - -- Add the name of the package as it appears on `CRAN` or `Bioconductor` to the appropriate package list: - - `deployment/secure_research_desktop/packages/packages-r-bioconductor.list` - - `deployment/secure_research_desktop/packages/packages-r-cran.list` -- If this `R` package is available as a pre-compiled apt binary (eg. `abind` is available as `r-cran-abind`) then also add it to `deployment/secure_research_desktop/packages/packages-apt.list`. -- You should also add this package to the **allow list** used by {ref}`policy_tier_3` package mirrors in `environment_configs/package_lists/allowlist-core-r-cran-tier3.list` - -#### Adding packages to the package allowlist - -- When you add a new package to either the `PyPI` or `CRAN` allowlist you should also determine all of its dependencies (and their dependencies, recursively) -- Once you have the list of packages you should add them to: - - **PyPI:** `environment_configs/package_lists/allowlist-full-python-pypi-tier3.list` - - **CRAN:** `environment_configs/package_lists/allowlist-full-r-cran-tier3.list` - -### Changing the version of a package - -If you want to update the version of one of the packages we install from a `.deb` file (eg. `RStudio`), you will need to edit `deployment/secure_research_desktop/cloud_init/cloud-init-buildimage-ubuntu-.mustache.yaml` - -- Find the appropriate `/installation/.debinfo` section under the `write_files:` key -- Update the version number and the `sha256` hash for the file -- Check that the file naming structure still matches the format described in this `.debinfo` file - -## 3. {{construction_worker}} Build a release candidate - -In order to provision a candidate VM you will need to do the following: - -![Powershell: two to three hours](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=two%20to%20three%20hours) at {{file_folder}} `./deployment/secure_research_desktop/setup` - -```powershell -PS> ./Provision_Compute_VM.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SRE - -```{note} -- Although the `./Provision_Compute_VM.ps1` script will finish running in a few minutes, the build itself will take several hours. -- We recommend **monitoring** the build by accessing the machine using `ssh` (the ssh info should be printed at the end of the Provision_Compute_VM.ps1 script) and either reading through the full build log at `/var/log/cloud-init-output.log` or running the summary script using `/opt/monitoring/analyse_build.py`. -- **NB.** You will need to connect from an approved administrator IP address -- **NB.** the VM will automatically shutdown at the end of the cloud-init process - if you want to analyse the build after this point, you will need to turn it back on in the `Azure` portal. -``` - -```{error} -- If you are unable to access the VM over `ssh` please check whether you are trying to connect from one of the approved IP addresses that you defined under `vmImages > buildIpAddresses` in the SHM config file. -- You can check which IP addresses are currently allowed by looking at the `AllowBuildAdminSSH` inbound connection rule in the `RG_VMIMAGES_NETWORKING > NSG_VMIMAGES_BUILD_CANDIDATES` network security group in the subscription where you are building the candidate VM -``` - -## 4. {{camera}} Convert candidate VM to an image - -Once you are happy with a particular candidate, you can convert it into an image as follows: - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_desktop/setup` - -```powershell -PS> ./Convert_VM_To_Image.ps1 -shmId -vmName -``` - -- where `` is the {ref}`management environment ID ` for this SRE -- where `` is the name of the virtual machine created during the provisioning step - -This will build a new image in `RG_VMIMAGES_STORAGE` and delete the VM plus associated build artifacts (hard disk, network card and public IP address) - -```{note} -The first step of this script will run the remote build analysis script. -Please **check** that everything has built correctly before proceeding. -``` - -## 5. {{art}} Register image in the gallery - -Once you have created an image, it can be registered in the image gallery for future use using the `Register_Image_In_Gallery.ps1` script. - -![Powershell: one hour](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=one%20hour) at {{file_folder}} `./deployment/secure_research_desktop/setup` - -```powershell -PS> ./Register_Image_In_Gallery.ps1 -shmId -imageName -``` - -- where `` is the {ref}`management environment ID ` for this SRE -- where `` is the name of the VM image created during the conversion step - -This will register the image in the shared gallery as a new version of the relevant SRD image. -This command can take between 30 minutes and 1 hour to complete, as it has to replicate the VM across 3 different regions. diff --git a/docs/source/deployment/configure_entra_id.md b/docs/source/deployment/configure_entra_id.md new file mode 100644 index 0000000000..73da00895a --- /dev/null +++ b/docs/source/deployment/configure_entra_id.md @@ -0,0 +1,232 @@ +(configure_entra_id)= + +# Configure Microsoft Entra ID + +These instructions will configure the [Microsoft Entra ID](https://www.microsoft.com/en-gb/security/business/identity-access/microsoft-entra-id) where you will manage your users. +You only need one Microsoft Entra ID for your deployment of the Data Safe Haven. + +## Setting up your Microsoft Entra tenant + +:::{tip} +We suggest using a dedicated Microsoft Entra tenant for your DSH deployment, but this is not a requirement. + +We also recommend using a separate tenant for managing your users from the one where your infrastructure subscriptions live, but this is not a requirement. +::: + +If you decide to deploy a new tenant for user management, follow the instructions here: + +:::{admonition} How to deploy a new tenant +:class: dropdown note +Follow the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/create-new-tenant). + +- set the **Organisation Name** to something appropriate for your deployment (_e.g._ _Contoso Production Safe Haven_) +- set the **Initial Domain Name** to the lower-case version of the organisation name with spaces and special characters removed (_e.g._ _contosoproductionsafehaven_) +- set the **Country or Region** to whichever country is appropriate for your deployment (_e.g._ _United Kingdom_) + +::: + +## Create a native Microsoft Entra administrator account + +If you created a new Microsoft Entra tenant, an external administrator account will have been automatically created for you. +If you do not already have access to a **native** administrator account, create one using the steps below. + +:::{admonition} How to create a native Entra administrator +:class: dropdown hint +Follow the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users#create-a-new-user). +Use the following settings: + +- **Basics** tab: + - **User principal name:** entra.admin._FIRST_NAME_._LAST_NAME_ + - If you have a choice of domains use _YOUR_ORGANISATION_.onmicrosoft.com, which will create a clearer separation between administrators and users + - **Display name:** Entra Admin - _FIRST_NAME_ _LAST_NAME_ + - **Other fields:** leave them with their default values +- **Properties** tab: + - **Usage location:** set to the country being used for this deployment +- **Assigments** tab: + - Click the **{guilabel}`+ Add role`** button + - Search for **Global Administrator**, and check the box + - Click the **{guilabel}`Select`** button + +::: + +## Register allowed authentication methods + +In this section, you will determine which methods are permitted for multi-factor authentication (MFA). +This is necessary both to secure logins and to allow users to set their own passwords. + +- Sign in to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **{menuselection}`Protection --> Authentication methods`** from the menu on the left side +- Browse to **{menuselection}`Manage --> Policies`** from the secondary menu on the left side +- For each of **Microsoft Authenticator**, **SMS**, **Voice call** and **Email OTP** click on the method name + - Ensure the slider is set to **Enable** and the target to **All users** + - Click the **{guilabel}`Save`** button + +## Activate your native Microsoft Entra account + +In order to use this account you will need to activate it. +Start by setting up authentication methods for this user, following the steps below. + +:::{admonition} How to set up authentication for an Entra user +:class: dropdown hint + +- Follow the instructions [here](https://learn.microsoft.com/en-us/entra/identity/authentication/howto-mfa-userdevicesettings#add-authentication-methods-for-a-user). +- Ensure that you provide **both** a phone number **and** an email address. + +::: + +Now you can reset the password for this user, following the steps below. + +:::{admonition} How to reset your Entra user password +:class: dropdown hint + +- Follow the instructions [here](https://passwordreset.microsoftonline.com/) to set your password +- You will need access to the phone number and/or email address from the previous step + +::: + +## Delete any external administrators + +:::{warning} +In this step we will delete any external account with administrator privileges which might belong to Microsoft Entra ID. +Before you do this, you **must** ensure that you can log into Entra using your **native** administrator account. +::: + +Start by identifying whether you have any external users. + +:::{admonition} How to identify external users +:class: dropdown hint + +The **User principal name** field for external users will contain the external domain and will have `#EXT#` before the `@` sign. +::: + +- Sign in to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Click on your profile picture at the top right of the page +- Click the **{guilabel}`Sign out`** button to log out of any accounts +- Log in with your native administrator credentials +- Follow the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users#delete-a-user) to delete each external user + +:::{note} +We recommend deleting **all** external users, but if these users are necessary, you can instead remove administrator privileges from them. +::: + +## Create additional administrators + +:::{important} +In order to avoid being a single point of failure, we strongly recommend that you add other administrators in addition to yourself. +::: + +For each other person who will act as an administrator, create an account for them following the steps above and then allow them to reset their own password. + +:::{caution} +You may want to set up an emergency administrator to ensure access to this tenant is not lost if you misconfigure MFA. +To do so, follow the instructions [here](https://learn.microsoft.com/en-us/entra/identity/role-based-access-control/security-emergency-access). +Since this account will be exempt from normal login policies, it should not be used except when **absolutely necessary**. +::: + +## Purchase Microsoft Entra licences + +At least one user needs to have a [Microsoft Entra Licence](https://www.microsoft.com/en-gb/security/business/microsoft-entra-pricing) assigned in order to enable [self-service password reset](https://learn.microsoft.com/en-us/entra/identity/authentication/concept-sspr-licensing) and conditional access policies. + +:::{tip} +**P1 Licences** are sufficient but you may use another licence if you prefer. +::: + +- Sign in to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **{menuselection}`Identity --> Billing --> Licenses`** from the menu on the left side +- Browse to **{menuselection}`All products`** from the secondary menu on the left side +- If you have not currently licenced a product: + - Click on **{guilabel}`+Try/Buy`** and choose a suitable product + - Click the **{guilabel}`Activate`** button +- Wait a few minutes until the selected licence appears on the **All products** view + +## Enable self-service password reset + +In order to enable self-service password reset (SSPR) you will need to do the following: + +- Sign in to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **{menuselection}`Protection --> Password reset`** from the menu on the left side +- Browse to **{menuselection}`Manage --> Properties`** from the secondary menu on the left side +- Under the option **Self service password reset enabled**, choose **All** + +## Disable security defaults + +- Sign in to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **{menuselection}`Identity --> Overview --> Properties`** from the menu on the left side +- Click **{menuselection}`Manage security defaults`** at the bottom of the page +- In the pop-up menu on the right, set + - **Security defaults** to **Disabled (not recommended)** + - Select **My organization is planning to use Conditional Access** + - Click the **{guilabel}`Save`** button +- At the prompt click the **{guilabel}`Disable`** button + +## Apply conditional access policies + +- Sign in to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **{menuselection}`Protection --> Conditional Access`** from the menu on the left side +- Browse to **{menuselection}`Policies`** from the secondary menu on the left side + +### Require MFA + +These instructions will create a policy which requires all users (except the emergency administrator if you have created one) to use multi-factor authentication (MFA) to log in. + +:::{admonition} Require MFA policy details +:class: dropdown hint + +- Create a new policy named **Require MFA** +- Under **{menuselection}`Users`**: + - **Include**: Select **All users** + - **Exclude**: + - Check **Users and groups** + - If you created an emergency access admin account, select it here +- Under **{menuselection}`Target resources`**: + - **Include**: Select **All cloud apps** +- Under **{menuselection}`Conditions`**: + - Select **Device platforms** and set: + - **Configure:** Select **Yes** + - **Select device platforms:** Check all the boxes + - Click the **{guilabel}`Done`** button +- Under **{menuselection}`Grant`**: + - Check **Grant access** + - Check **Require multi-factor authentication** + - Click the **{guilabel}`Select`** button +- Under **{menuselection}`Session`**: + - Check **Sign-in frequency** + - Check **Periodic reauthentication** + - Set the value to **1 day(s)** +- Under **{menuselection}`Enable policy`**: + - Select **On** + - Check **I understand that my account will be impacted by this policy. Proceed anyway.** +- Click the **{guilabel}`Create`** button + +::: + +### Restrict Microsoft Entra ID access + +These instructions will prevent non-administrators from being able to view the Entra ID configuration. + +:::{admonition} Restrict Microsoft Entra ID access policy details +:class: dropdown hint + +- Create a new policy named **Restrict Microsoft Entra ID access** +- Under **{menuselection}`Users`**: + - **Include**: Select **All users** + - **Exclude**: + - Check **Directory roles** + - In the drop-down menu select **Global administrator** +- Under **{menuselection}`Target resources`**: + - **Include**: + - Select **Select apps** + - Click the **{guilabel}`Select`** button + - In the pop-up menu on the right, select + - **Windows Azure Service Management API** and + - **Microsoft Graph Command Line Tools** then + - Click the **{guilabel}`Select`** button + - **Exclude**: Leave unchanged as **None** +- Under **{menuselection}`Grant`**: + - Check **Block access** + - Click the **{guilabel}`Select`** button +- Under **{menuselection}`Enable policy`** + - Select **On** +- Click the **{guilabel}`Create`** button + +::: diff --git a/docs/source/deployment/deploy_shm.md b/docs/source/deployment/deploy_shm.md index d74ddb678f..8a3425b117 100644 --- a/docs/source/deployment/deploy_shm.md +++ b/docs/source/deployment/deploy_shm.md @@ -1,1092 +1,92 @@ (deploy_shm)= -# Deploy a Safe Haven Management Environment (SHM) +# Deploy the management environment These instructions will deploy a new Safe Haven Management Environment (SHM). -This is required to manage your Secure Research Environments (SREs) and **must be** deployed before you create any SREs. -A single SHM can manage all your SREs. -Alternatively, you may run multiple SHMs concurrently, for example you may have a group of projects with the same lifecycle which share a different SHM to your other projects. - -```{include} snippets/00_symbols.partial.md -:relative-images: -``` - -(deploy_shm_prerequisites)= - -## 1. {{seedling}} Prerequisites - -- An [Azure subscription](https://portal.azure.com) with sufficient credits to build the environment in: we recommend around $3,000 as a reasonable starting point. - - ```{tip} - - Ensure that the **Owner** of the subscription is an `Azure Security group` that contains all administrators and no-one else. - - We recommend using separate `Microsoft Entra IDs` for users and administrators - ``` - -- `PowerShell` - - We recommend [installing](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell) the [latest stable release](https://learn.microsoft.com/en-us/powershell/scripting/install/powershell-support-lifecycle?view=powershell-7.4) of Powershell. We have most recently tested deployment using version `7.4.1`. -- `Powershell` cross-platform modules - - ````{tip} - Check whether you are missing any required modules by running - ```powershell - PS> ./deployment/CheckRequirements.ps1 - ``` - Either manually install each missing module or install them all with - ```powershell - PS> ./deployment/CheckRequirements.ps1 -InstallMissing - ``` - ```` - -- `Microsoft Remote Desktop` - - ![macOS](https://img.shields.io/badge/-555?&logo=apple&logoColor=white) this can be installed from the [Apple store](https://www.apple.com/app-store/) - - ![Windows](https://img.shields.io/badge/-555?&logo=windows&logoColor=white) this can be [downloaded from Microsoft](https://apps.microsoft.com/store/detail/microsoft-remote-desktop/9WZDNCRFJ3PS) - - ![Linux](https://img.shields.io/badge/-555?&logo=linux&logoColor=white) use your favourite remote desktop client -- `OpenSSL` - - - ![macOS](https://img.shields.io/badge/-555?&logo=apple&logoColor=white) a pre-compiled version can be installed using Homebrew: `brew install openssl` - - ![Windows](https://img.shields.io/badge/-555?&logo=windows&logoColor=white) binaries are [available here](https://wiki.openssl.org/index.php/Binaries). - - ```{error} - If `Powershell` cannot detect `OpenSSL` you may need to explicitly add your `OpenSSL` installation to your `Powershell` path by running `$env:path = $env:path + ";` - ``` - - - ![Linux](https://img.shields.io/badge/-555?&logo=linux&logoColor=white) use your favourite package manager or install manually following the [instructions on GitHub](https://github.com/openssl/openssl) - -- `Docker Hub` account - - The DSH makes use of several public Docker images. - Due to Docker Hub download [rate limits](https://docs.docker.com/docker-hub/download-rate-limit/), we now require Docker credentials to ensure that all images are successfully downloaded at the time of deployment. - - We recommend using a personal access token (PAT) with Public Repo Read-Only permissions rather than your Docker account password. - See [instructions on Docker](https://docs.docker.com/security/for-developers/access-tokens/) for details of how to create a PAT. - -````{hint} -If you run: - -```powershell -PS> Start-Transcript -Path -``` - -before you start your deployment and - -```powershell -PS> Stop-Transcript -``` - -afterwards, you will automatically get a full log of the Powershell commands you have run. -```` - -## 2. {{clipboard}} Safe Haven Management configuration - -(roles_deployer_shm_id)= - -### Management environment ID - -```{important} -Choose a short ID `` to identify the management environment (e.g. `project`). -This can have a maximum of **seven alphanumeric characters**. -``` - -(roles_system_deployer_shm_configuration_file)= - -### Create configuration file - -The core properties for the Safe Haven Management (SHM) environment must be defined in a JSON file named `shm__core_config.json` in the `environment_configs` folder. -The following core SHM properties are required - look in the `environment_configs` folder to see some examples. - -```json -{ - "name": "Name of this Safe Haven (e.g. 'Turing Production Safe Haven').", - "shmId": "The that you decided on above (e.g. 'project').", - "domain": "The fully qualified domain name for the management environment (e.g. 'project.turingsafehaven.ac.uk')", - "timezone": "[Optional] Timezone in IANA format (e.g. 'Europe/London').", - "azure": { - "adminGroupName": "Azure Security Group that admins of this Safe Haven will belong to (see below for details).", - "activeDirectoryTenantId": "Tenant ID for the Microsoft Entra ID containing users (see below for details on how to obtain this). Note that we preserve the Active Directory name here for compatability with earlier DSH versions.", - "location": "Azure location to deploy the management environment into (e.g. 'uksouth').", - "subscriptionName": "Azure subscription to deploy the management environment into." - }, - "organisation": { - "name": "Name of your organisation, used when generating SSL certificates (e.g. 'The Alan Turing Institute')", - "townCity": "Town where your organisation is located, used when generating SSL certificates (e.g. 'London')", - "stateCountyRegion": "Region where your organisation is located, used when generating SSL certificates (e.g. 'London')", - "countryCode": "Country where your organisation is located, used when generating SSL certificates (e.g. 'GB')", - "contactEmail": "Email address at your organisation that will receive notifications when SSL certificates are about to expire." - }, - "dnsRecords": { - "subscriptionName": "[Optional] Azure subscription which holds DNS records (if not specified then the value from the 'azure' block will be used).", - "resourceGroupName": "[Optional] Resource group which holds DNS records (e.g. RG_SHM_DNS_TEST)." - }, - "repositoryType": { - "tier2": "[Optional] Whether to use 'mirror' or 'proxy' for tier-2 repositories (default is 'proxy').", - "tier3": "[Optional] Whether to use 'mirror' or 'proxy' for tier-3 repositories (default is 'proxy')." - }, - "vmImages": { - "subscriptionName": "[Optional] Azure subscription where VM images will be built (if not specified then the value from the 'azure' block will be used). Multiple Safe Haven deployments can share a single set of VM images in a common subscription if desired - this is what is done in the Turing deployment. If you are hoping to use images that have already been built for another Safe Haven deployment, make sure you specify this parameter accordingly.", - "location": "[Optional] Azure location where VM images should be built (if not specified then the value from the 'azure' block will be used). Multiple Safe Haven deployments can share a single set of VM images in a common subscription if desired - this is what is done in the Turing deployment. If you are hoping to use images that have already been built for another Safe Haven deployment, make sure you specify this parameter accordingly.", - "buildIpAddresses": "[Optional] One or more IP addresses which admins will be running the VM build scripts from (if not specified then Turing IP addresses will be used)." - }, - "docker": { - "account": "A Docker Hub account name.", - "password": "The password or personal access token for the above account. We strongly recommend using a Personal Access Token with permissions set to Public Repo Read-only" - }, - "overrides": "[Optional, Advanced] Do not use this unless you know what you're doing! If you want to override any of the default settings, you can do so by creating the same JSON structure that would be found in the final config file and nesting it under this entry. For example, to change the size of the data disk on the domain controller, you could use something like: 'shm: { dc: { disks: { data: { sizeGb: 50 } } } }'" -} -``` - -```{note} -- This configuration file is also used when deploying an SRE environment. -- We recommend that you set the fully qualified domain name to `.`. -- This may require purchasing a dedicated domain so follow your organisation's guidance. -- You must ensure that the group specifed in `azure.adminGroupName` exists in the Microsoft Entra ID for the tenant that you will be deploying into. Depending on your setup, this may be different from the Microsoft Entra ID where your users are created. -``` - -```{admonition} Alan Turing Institute default -- **production** uses `.turingsafehaven.ac.uk` -- **development** uses `.dsgroupdev.co.uk` -``` - -### (Optional) Verify code version - -If you have cloned/forked the code from our `GitHub` repository, you can confirm which version of the Data Safe Haven you are currently using by running the following commands: - -![Powershell: a few seconds](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20seconds) - -```powershell -PS> git tag --list | Select-String $(git describe --tags) -``` - -This will check the tag you are using against the list of known tags and print it out. -You can include this confirmation in any record you keep of your deployment. - -### (Optional) View full SHM configuration - -A full configuration, which will be used in subsequent steps, will be automatically generated from your core configuration. -Should you wish to, you can print the full SHM config by running the following Powershell command: - -![Powershell: a few seconds](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20seconds) at {{file_folder}} `./deployment` - -```powershell -PS> ./ShowConfigFile.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -(roles_deployer_setup_aad)= - -## 3. {{file_folder}} Setup Microsoft Entra ID - -```{warning} -If you wish to reuse an existing Microsoft Entra ID please make sure you remove any existing `Conditional Access Policies` by going to `Security > Conditional Access > Policies` and manually removing the `Restrict Microsoft Entra ID access` and `Require MFA` policies. -You can then continue to the next step: {ref}`getting the Microsoft Entra tenant ID `. -``` - -### Create a new Microsoft Entra ID - -![Portal: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=one%20minute) - -- From the Azure portal, click `Create a Resource` and search for `Microsoft Entra ID` -
Screenshots - - ```{image} deploy_shm/AAD.png - :alt: Microsoft Entra ID - :align: center - ``` - -
- -- Click `Create` -- Set the `Organisation Name` to the value of `` in your core configuration file (e.g. `Turing Production Safe Haven`) - - Note: be careful not to confuse this with the `` under `` used in the config file -- Set the `Initial Domain Name` to the `Organisation Name` all lower case with spaces removed (e.g. `turingproductionsafehaven`) -- Set the `Country or Region` to whatever region is appropriate for your deployment (e.g. `United Kingdom`) -
Screenshots - - ```{image} deploy_shm/aad_creation.png - :alt: Microsoft Entra ID creation - :align: center - ``` - -
- -- Click `Create` -- Wait for Microsoft Entra ID to be created - -(roles_deployer_aad_tenant_id)= - -### Get the Microsoft Entra Tenant ID - -![Microsoft Entra ID: one minute](https://img.shields.io/badge/Microsoft_Entra_ID-One_minute-blue?logo=microsoft-academic) - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. - You can do this by: - - Clicking the link displayed at the end of the initial Microsoft Entra ID deployment. - - Clicking on your username and profile icon at the top left of the Azure portal, clicking `Switch directory` and selecting the Microsoft Entra ID you have just created from the `All Directories` section of the `Directory + Subscription` panel that then displays. -- If required, click the "hamburger" menu in the top left corner (three horizontal lines) and select `Microsoft Entra ID` -- Click `Overview` in the left panel and copy the `Tenant ID` displayed under the Microsoft Entra ID name and initial `something.onmicrosoft.com` domain. -
Screenshots - - ```{image} deploy_shm/aad_tenant_id.png - :alt: Microsoft Entra tenant ID - :align: center - ``` - -
- -- Ensure that you add this to the {ref}`configuration file ` for this SHM under `azure > activeDirectoryTenantId`. - -(roles_deployer_shm_configure_dns)= - -## 4. {{door}} Register custom domain with Microsoft Entra ID - -### Configure DNS for the custom domain - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_DNS_Zone.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -````{error} -If you see a message `You need to add the following NS records to the parent DNS system for...` you will need to add the NS records manually to the parent's DNS system, as follows: - -
Manual DNS configuration instructions - -- To find the required values for the NS records on the portal, click `All resources` in the far left panel, search for `DNS Zone` and locate the DNS Zone with the SHM's domain. -- The NS record will list four Azure name servers which must be duplicated to the parent DNS system. -- If the parent domain has an Azure DNS Zone, create an NS record set in this zone. - - The name should be set to the subdomain (e.g. `project`) or `@` if using a custom domain, and the values duplicated from above - - For example, for a new subdomain `project.turingsafehaven.ac.uk`, duplicate the NS records from the Azure DNS Zone `project.turingsafehaven.ac.uk` to the Azure DNS Zone for `turingsafehaven.ac.uk`, by creating a record set with name `project` - ```{image} deploy_shm/shm_subdomain_ns.png - :alt: Subdomain NS record - :align: center - ``` -- If the parent domain is outside of Azure, create NS records in the registrar for the new domain with the same value as the NS records in the new Azure DNS Zone for the domain. -
-```` - -### Add the SHM domain to the Microsoft Entra ID - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_AAD_Domain.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -```{error} -If you get an error like `Could not load file or assembly 'Microsoft.IdentityModel.Clients.ActiveDirectory, Version=3.19.8.16603, Culture=neutral PublicKeyToken=31bf3856ad364e35'. Could not find or load a specific file. (0x80131621)` then you may need to try again in a fresh `Powershell` terminal. -``` - -```{error} -Due to delays with DNS propagation, the script may occasionally exhaust the maximum number of retries without managing to verify the domain. -If this occurs, run the script again. -If it exhausts the number of retries a second time, wait an hour and try again. -``` - -(roles_deploy_add_additional_admins)= - -## 5. {{hammer}} Create Microsoft Entra administrator accounts - -A default external administrator account was automatically created for the user you were logged in as when you initially created the Microsoft Entra ID. -This user should also **not be used** for administering the Microsoft Entra ID. - -Several later steps will require the use of a **native** administrator account with a valid mobile phone and email address. -You must therefore create and activate a **native** administrator account for each person who will be acting as a system administrator. -After doing so, you can delete the default external user - we strongly recommend that you do so. - -```{tip} -In order to avoid being a single point of failure, we strongly recommend that you add other administrators in addition to yourself. -``` - -```{caution} -An emergency access admin account is created later in the deployment process. -This should not be used except when **absolute necessary**. -In particular, it should not be used as a shared admin account for routine administration of the Safe Haven. -``` - -### Create a new account for each administrator (including yourself) - -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click `Users` in the left hand sidebar and click on the `+New user` icon in the top menu above the list of users. - -#### Create an internal admin user: - -- User name: `aad.admin.firstname.lastname@` -- Name: `AAD Admin - Firstname Lastname` -- Leave `Auto-generate password` set. Users will be able to reset their passwords on first login and it is good security practice for admins not to know user passwords. -- Click the `User` link in the `Roles` field and make the user an administrator: - - Search for `Global Administrator` - - Check `Global Administrator` - - Click the `Select` button -- Set their usage location to the country you used when creating the Safe Haven Microsoft Entra ID -- Leave all other fields empty, including First name and Last name -- Click `Create` - -```{image} deploy_shm/aad_create_admin.png -:alt: AAD create admin account -:align: center -``` - -#### Add authentication methods for self-service password reset - -- Navigate to `Users` and click on the account you have just created. -- Click on `Properties` and then edit the `Contact info` section. - - Add the the user's mobile phone number to the `Mobile phone` field. - Make sure to prefix it with the country code and **do not include** the leading zero (e.g. `+44 7700900000`). - - They will need to enter their number in **exactly this format** when performing a self-service password reset. - - Do **not** add anything in the `Email` field here as this will prevent you from using the same email address for a user account. - - Click the `Save` icon in top panel. -- In the left-hand sidebar click `Authentication methods`. - - Enter the user's mobile phone number in the `Phone` field, using the same format as above. - - Note that you do **not** need to fill out the `Alternate Phone` field. - - Enter the user's institutional email address in the `Email` field. - - Ensure that you have registered **both** a phone number and an email address. - - Click the `Save` icon in top panel. - -### Register allowed authentication methods - -When you have finished creating administrator accounts, you will need to ensure that they are able to set their own passwords - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click `Manage > Password Reset` on the left-hand sidebar -- Click `Manage > Authentication methods` on the left-hand sidebar -- Ensure that both `Email` and `Mobile phone` are enabled - -```{image} deploy_shm/aad_authentication_methods.png -:alt: AAD create admin account -:align: center -``` - -### Activate and configure your new internal admin account - -```{warning} -In the next step we will delete the external admin account created for the user account you used to create the Microsoft Entra ID. -Before you do this, you **must** configure and log into the **native** admin account you have just created for yourself. -``` - -- The `` for this account is `aad.admin.firstname.lastname` -- The `` for this account is the same as the `` - -The administrators you have just set up can activate their accounts by following the password and MFA steps in the {ref}`user guide `. - -### Remove the default external user that was used to create the Microsoft Entra ID - -```{warning} -Make sure you have activated your account and **successfully logged in** with the new **native** administrator account you have just created for yourself (`aad.admin.firstname.lastname@`) before deleting the default external administrator account. -``` - -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) - -- Ensure you are logged in with the new **native** administrator account you have just created. - - Click on your username at the top right corner of the screen, then `Sign in with a different user`. - - Log in with the password you set for yourself when activating your admin account in the previous step -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click `Users` in the left hand sidebar -- Select the default **external** user that was created when you created the Microsoft Entra ID. - - The `User principal name` field for this user will contain the **external domain** and will have `#EXT#` before the `@` sign (for example `alovelace_turing.ac.uk#EXT#@turingsafehaven.onmicrosoft.com`) -- Click the `Delete user` icon in the menu bar at the top of the user list panel - -(roles_deployer_deploy_shm)= - -## 6. {{computer}} Deploy SHM - -![Powershell: a few hours](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20hours) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Deploy_SHM.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -You will be prompted for credentials for: - -- a user with admin rights over the Azure subscriptions you plan to deploy into -- a user with Global Administrator privileges over the Microsoft Entra ID you set up earlier - -This will perform the following actions, which can be run individually if desired: - -(roles_deployer_shm_key_vault)= - -
-Deploy Key Vault for SHM secrets and create an emergency admin account - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_Key_Vault_And_Emergency_Admin.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -```{error} -If you get an error like `Could not load file or assembly 'Microsoft.IdentityModel.Clients.ActiveDirectory, Version=3.19.8.16603, Culture=neutral PublicKeyToken=31bf3856ad364e35'. Could not find or load a specific file. (0x80131621)` then you may need to try again in a fresh `Powershell` terminal. -``` - -Some (rare) operations require you to be logged in as a **native** Global Administrator. -To support these rare cases, and to allow access to the Safe Haven Microsoft Entra ID in the case of loss of access to personal administrator accounts (e.g. lost access to MFA), an **emergency access** administrator account has been created by the above script. - -```{warning} -Do not use this account unless absolutely required! -``` - -
- -(roles_deployer_shm_vnet_gateway)= - -
-Deploy network and VPN gateway - -![Powershell: twenty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=twenty%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_Networking.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -Sanity check - -![Portal: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=one%20minute) - -Once the script exits successfully you should see the following resource groups in the Azure Portal under the SHM subscription, with the appropriate `` for your deployment e.g. `RG_SHM__NETWORKING`: - -```{image} deploy_shm/vnet_resource_groups.png -:alt: Resource groups -:align: center -``` +This is required to manage your Secure Research Environments (SREs). -```{error} -If you cannot see these resource groups: -- Ensure you are logged into the portal using the account that you are building the environment with. -- Click on your username in the top right corner of the Azure portal screen and ensure that your SHM subscription (see `shm__core_config.json`) is one of the selections. -- Click the "hamburger" menu in the top left corner (three horizontal lines) and select `Resource groups`. -``` +:::{important} +The SHM **must** be setup before any SREs can be deployed. +::: -
- -(roles_system_deployer_shm_deploy_logging)= - -
-Deploy monitoring - -![Powershell: thirty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=thirty%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_Monitoring.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -
- -(roles_system_deployer_shm_deploy_firewall)= - -
-Deploy firewall - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_Firewall.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -
- -(roles_deployer_shm_domain_controllers)= - -
-Deploy domain controllers - -![Powershell: one hour](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=one%20hour) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_DC.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -Sanity check - -Once the script exits successfully you should see the following resource groups in the Azure Portal under the SHM subscription, with the appropriate `` for your deployment e.g. `RG_SHM__NETWORKING`: - -```{image} deploy_shm/dc_resource_groups.png -:alt: Resource groups -:align: center -``` - -```{error} -If you cannot see these resource groups: -- Ensure you are logged into the portal using the account that you are building the environment with. -- Click on your username in the top right corner of the Azure portal screen and ensure that your SHM subscription (see `shm__core_config.json`) is one of the selections. -- Click the "hamburger" menu in the top left corner (three horizontal lines) and select `Resource groups`. -``` - -
- -(roles_system_deployer_shm_deploy_update_servers)= - -
-Deploy update servers - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_Update_Servers.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -
- -(roles_system_deployer_shm_deploy_mirrors)= - -
-Deploy local package repositories - -Two different types of local package repositories are available for {ref}`policy_tier_2` and {ref}`policy_tier_3` SREs: - -- **Proxy** (the repository makes on-demand connections to the external repository) -- **Mirror** (the repository full replicates all requested packages from the external repository) - -```{hint} -We **recommend** using Nexus proxies at both {ref}`policy_tier_2` and {ref}`policy_tier_3` to avoid the time taken to sync local mirrors. -``` - -We currently support the **PyPI** (Python) and **CRAN** (R) repositories. - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Setup_SHM_Package_Repositories.ps1 -shmId -``` - -- where `` is the {ref}`management environment ID ` for this SHM - -```{danger} -You should never attempt to manage the Nexus proxy through the web interface. -Doing so from outside the Nexus subnet could expose the admin credentials. -``` - -```{warning} -Note that a full set of {ref}`policy_tier_2` local mirrors currently take around **two weeks** to fully synchronise with the external package repositories as PyPI contains >10TB of packages. -``` - -
- -(deploy_shm_vpn)= - -## 7. {{station}} Configure VPN connection - -### Download a client VPN certificate for the Safe Haven Management network - -![Portal: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=one%20minute) - -- Navigate to the SHM Key Vault via `Resource Groups > RG_SHM__SECRETS > kv-shm-` -- Once there open the `Certificates` page under the `Settings` section in the left hand sidebar. -- Click on the certificate named `shm--vpn-client-cert` and select the `CURRENT VERSION` -- Click the `Download in PFX/PEM format` link at the top of the page and save the `*.pfx` certificate file locally -- To install, double click on the downloaded certificate (or on macOS you can manually drag it into the `login` keychain), leaving the password field blank. - -**Make sure to securely delete the local "\*.pfx" certificate file that you downloaded after you have installed it.** - -### Setup VPN connection to the Safe Haven Management network - -![Portal: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=one%20minute) - -- Navigate to the Safe Haven Management (SHM) virtual network gateway in the SHM subscription via `Resource Groups > RG_SHM__NETWORKING > VNET_SHM__GW` -- Once there open the `Point-to-site configuration` page under the `Settings` section in the left hand sidebar -- Click the `Download VPN client` link at the top of the page to download a zip file -
Screenshots - - ```{image} deploy_shm/certificate_details.png - :alt: Certificate details - :align: center - ``` - -
- -- Unzip the zip file and identify the root certificate (`Generic\VpnServerRoot.cer`) and VPN configuration file (`Generic\VpnSettings.xml`) -- Follow the [VPN set up instructions](https://web.archive.org/web/20240527120727/https://learn.microsoft.com/en-us/azure/vpn-gateway/point-to-site-vpn-client-cert-windows) using the section appropriate to your operating system (**you do not need to install the `Generic\VpnServerRoot.cer` certificate, as we're using our own self-signed root certificate**): - -```{admonition} ![Windows](https://img.shields.io/badge/-555?&logo=windows&logoColor=white) instructions -- Use SSTP for the VPN type -- Name the VPN connection `Safe Haven Management Gateway ()` -- **Do not** rename the VPN client as this will break it -``` - -````{admonition} ![macOS](https://img.shields.io/badge/-555?&logo=apple&logoColor=white) instructions -- Start from "Configure VPN client profile" step of the `macOS` instructions. -- Use IKEv2 for the VPN type -
For users of macOS Catalina or later - - You must select `None` from the drop-down (not `Certificate`) and then select the `Certificate` radio button underneath as shown in the image below. - ```{image} deploy_shm/catalina_authentication.png - :alt: Certificate details - :align: center - ``` -
-- Name the VPN connection `Safe Haven Management Gateway ()` -- You can view the details of the downloaded certificate by highlighting the certificate file in Finder and pressing the spacebar. -- You can then look for the certificate of the same name in the login KeyChain and view its details by double clicking the list entry. -- If the details match the certificate has been successfully installed. -```` - -You should now be able to connect to the SHM virtual network via the VPN. - -```{important} -Each time you need to access the virtual network ensure you are connected via the VPN. -``` - -```{error} -![Windows](https://img.shields.io/badge/-555?&logo=windows&logoColor=white) You may get a `Windows protected your PC` pop up. -If so, click `More info -> Run anyway`. -``` - -```{error} -![Windows](https://img.shields.io/badge/-555?&logo=windows&logoColor=white) You may encounter a further warning along the lines of `Windows cannot access the specified device, path, or file`. -This may mean that your antivirus is blocking the VPN client. -You will need to configure your antivirus software to make an exception. -``` - -(roles_system_deployer_configure_domain_controllers)= - -## 8. {{house_with_garden}} Configure domain controllers - -(roles_system_deployer_shm_remote_desktop)= - -### Configure the first domain controller via Remote Desktop - -![Portal: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=one%20minute) - -- Navigate to the **SHM primary domain controller** VM in the portal at `Resource Groups > RG_SHM__DC > DC1-SHM-` and note the `Private IP address` for this VM -- Next, navigate to the `RG_SHM__SECRETS` resource group and then the `kv-shm-` Key Vault and then select `secrets` on the left hand panel and retrieve the following: - - `` is in the `shm--domain-admin-username` secret. - - `` is the `` followed by the SHM AD domain: `@`. - - `` is in the `shm--domain-admin-password` secret. - -```{danger} -- These domain administrator credentials have complete control over creating and deleting users as well as assigning them to groups. -- Do not use them except where specified and never write them down! -- Be particularly careful never to use them to log in to any user-accessible VMs (such as the secure research desktops). -``` - -(roles_deployer_shm_aad_connect)= - -#### Install Microsoft Entra Connect - -![Remote: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=ten%20minutes) - -````{include} ../roles/system_manager/snippets/02_ms_entra_connect.partial.md -:relative-images: -```` - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the `private IP address`, `` and `` that you {ref}`obtained from the portal above `. -- Navigate to `C:\Installation` -- Run the `AzureADConnect` Windows Installer Package - - On the `Welcome to Azure AD Connect` screen: - - Tick the `I agree to the license terms` box - - Click `Continue` - - On the `Express Settings` screen: - - Click `Customize` - - On the `Install required components` screen: - - Click `Install` - - On the `User sign-in` screen: - - Ensure that `Password Hash Synchronization` is selected - - Click `Next` - - On the `Connect to Azure AD` screen: - - Provide credentials for the Azure Active Directory **global administrator** account you set up earlier (`aad.admin..@`) when prompted - - If you receive a pop-up prompt, provide the same credentials when prompted - - Back on the `Connect to Azure AD` screen, click `Next` - - Approve the login with MFA if required - - On the `Connect your directories` screen: - - Ensure that correct forest (your custom domain name; e.g `turingsafehaven.ac.uk`) is selected and click `Add Directory` - - On the `AD forest account` pop-up: - - Select `Use existing AD account` - - Enter the details for the `localadsync` user. - - **Username**: use the value of the `shm--aad-localsync-username` secret in the SHM key vault: - - EITHER prepended with `\`, where the `Domain ID` is the capitalised form of the ``, so if the _SHM ID_ is `project` and the _username_ is `projectlocaladsync` then you would use `PROJECT\projectlocaladsync` here. - - OR suffixed with ``, so if the _SHM domain_ is `project.turingsafehaven.ac.uk` and the _username_ is `projectlocaladsync` then you would use `projectlocaladsync@project.turingsafehaven.ac.uk` here. - - **Password**: use the `shm--aad-localsync-password` secret in the SHM key vault. - - Click `OK` - - Click `Next` - - On the `Azure AD sign-in configuration` screen: - - Verify that the `User Principal Name` is set to `userPrincipalName` - - Click `Next` - - On the `Domain and OU filtering` screen: - - Select `Sync Selected domains and OUs` - - Expand the domain and deselect all objects - - Select `Safe Haven Research Users` and `Safe Haven Security Groups` - - Click `Next` - - On the `Uniquely identifying your users` screen: - - Click `Next` - - On the `Filter users and devices` screen: - - Select `Synchronize all users and devices` - - Click `Next` - - On the `Optional features` screen: - - Select `Password Writeback` - - Click `Next` - - On the `Ready to configure` screen: - - Ensure that the `Start the synchronisation process when configuration completes` option is ticked. - - Click `Install` - - This may take a few minutes to complete - - On the `Configuration complete` screen: - - Click `Exit` - -```{note} -Take care to consider any differences in the keyboard of your machine and the Windows remote desktop when entering any usernames or passwords. -``` - -```{error} -If you receive an Internet Explorer pop-up dialog `Content within this application coming from the website below is being blocked by Internet Explorer Advanced Security Configuration` for Microsoft domains such as `https://login.microsoft.com` or `https://aadcdn.msftauth.net` then you can safely add these as exceptions: -- Click `Add` -- Click `Close` -``` - -```{error} -If you receive an error message on the login webpage pop-ups saying `We can't sign you in. -Javascript is required to sign you in....` followed by the Script Error: `Do you want to continue running scripts on this page` you can safely allow Javascript: -- Click `Yes` -- Close the dialog by clicking `X` -``` - -```{error} -If you see a Windows Security Warning, related to the MFA login: -- Check `Don't show this message again` -- Click `Yes` to close the dialog. -``` - -```{error} -If you get an error that the username/password is incorrect or that the domain/directory could not be found when entering the details for the `localadsync` user, try resetting the password for this user in the **Domain Controller** Active Directory so that it matches the value stored in the Key Vault -- In Server Manager click `Tools > Active Directory Users and Computers` -- Expand the domain in the left hand panel -- Expand the `Safe Haven Service Accounts` OU -- Right click on the ` Local AD Sync Administrator` user and select `reset password` -- Set the password to the value from the appropriate Key Vault secret. -- Leave the other settings alone and click `OK` -``` - -```{error} -If you have recently torn down another SHM linked to the same Microsoft Entra ID you might see the error `Directory synchronization is currently in a pending disabled state for this directory. Please wait until directory synchronization has been fully disabled before trying again`. -You need to wait for the `Microsoft Entra ID` to fully disconnect - this can take up to 72 hours but is typically sooner. -You do not need to close the installer window while waiting. -If you need to, you can disconnect from the DC and VPN and reconnect later before clicking `Retry`. -``` - -```{error} -If you get an error that the connection to Azure Active Directory could not be made, please check that you do not have any Conditional Access policies enabled on the Azure Active Directory that require MFA for the synchronisation account. -``` - -(roles_system_deployer_shm_aad_connect_rules)= - -#### Update Azure Active Directory Connect rules - -This step allows the locale (country code) to be pushed from the local AD to Microsoft Entra ID. - -![Remote: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=one%20minute) - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the `private IP address`, `` and `` that you {ref}`obtained from the portal above `. -- Run the following command on the remote domain controller VM to update the Microsoft Entra rules - -```powershell -PS> C:\Installation\UpdateAADSyncRule.ps1 -``` - -(roles_system_deployer_shm_validate_aad_synchronisation)= - -### Validate Active Directory synchronisation - -This step validates that your local Active Directory users are correctly synchronised to Microsoft Entra ID. -Note that you can use the same script after deploying an SRE to add users in bulk. - -![Remote: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=one%20minute) - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the `private IP address`, `` and `` that you {ref}`obtained from the portal above `. -- Add your details to create researcher accounts for yourself and any other deployers. - -```{include} snippets/user_csv_format.partial.md -:relative-images: -``` - -- Run the following command on the remote domain controller VM to create and synchronise the users - -```powershell -PS> C:\Installation\CreateUsers.ps1 -``` - -- This script will add the users and trigger a sync with Microsoft Entra ID -- Wait a few minutes for the changes to propagate - -![Microsoft Entra ID: a few seconds](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20seconds) - -- Click `Users > All users` and confirm that the new user is shown in the user list. -- The new user account should have the `On-premises sync enabled` field set to `Yes` - -```{error} -If you get the message `New-ADUser: The specified account already exists` you should first check to see whether that user actually does already exist! -Once you're certain that you're adding a new user, make sure that the following fields are unique across all users in the Active Directory. - -- `SamAccountName`: Specified explicitly in the CSV file. - - If this is already in use, consider something like `firstname.middle.initials.lastname` -- `DistinguishedName`: Formed of `CN=,` by Active directory on user creation. - - If this is in use, consider changing `DisplayName` from ` ` to ` `. -``` - -### Configure AAD side of AD connect - -![Microsoft Entra ID: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=one%20minute) - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Select `Password reset` from the left hand menu -- Select `On-premises integration` from the left hand side bar - - - Ensure `Enable password writeback for synced users` is ticked. - - ```{image} deploy_shm/enable_password_writeback.png - :alt: Enable password writeback - :align: center - ``` - - - If you changed this setting, click the `Save` icon - -## 9. {{iphone}} Enable MFA and self-service password reset - -To enable self-service password reset (SSPR) and MFA-via-phone-call, you must have sufficient licences for all users. - -### Add licences that support self-service password reset - -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) - -Click the heading that applies to you to expand the instructions for that scenario. - -
Test deployments - -**For testing** you can enable a free trial of the P2 License (NB. it can take a while for these to appear on your Microsoft Entra ID). -You can activate the trial while logged in as your deafult guest administrator account. - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click on `Licences` in the left hand sidebar -- Click on `All products` in the left hand sidebar -- Click on the `+Try/Buy` text above the empty product list and add a suitable licence product. - - Expand the `Free trial` arrow under `Microsoft Entra ID P2` - - Click the `Activate` button - - Wait for the `Microsoft Entra ID P2` licence to appear on the list of `All Products` (this could take several minutes) - -
- -
Production deployments - -**For production** you should buy P1 licences. -This requires you to be logged in with an **native** Global Administrator account. -As activating self-service password reset requires active MFA licences, this is one of the rare occasions you will need to use the emergency access admin account. - -- Switch to the the **emergency administrator** account: - - Click on your username at the top right corner of the screen, then click "Sign in with a different account" - - Enter `aad.admin.emergency.access@` as the username - - Open a new browser tab and go to the [Azure Portal](https://portal.azure.com/) - - Change to the Microsoft Entra ID associated with the Safe Haven SHM subscription (e.g. an existing corporate Microsoft Entra ID). - Do this by clicking on your username at the top right corner of the screen, then `Switch directory`, then selecting the directory you wish to switch to. - - Click the "hamburger" menu in the top left corner (three horizontal lines) and select `Subscriptions` - - Click on the Safe Haven SHM subscription - - Click on `Resource Groups` in the left hand sidebar then `RG_SHM__SECRETS` - - Click on the `kv-shm-` Key Vault - - Click on `Secrets` in the left hand sidebar - - Click on the `shm--aad-emergency-admin-password` secret - - Click on the entry in the `Current version` section - - Click on the clipboard icon next to the `Secret value` field - - The emergency admin account password in now in your clipboard - - Switch back to the browser tab with the Azure login page - - Paste the password you copied from the Key Vault - - Click the `Sign in` button -- Click the `Purchase services` link in the information panel above the trial options. -- In the "Microsoft 365 Admin Centre" portal that opens: - - Expand the `Billing` section of the left hand side bar - - Click on `Purchase services` - - Scroll down the list of products and select `Microsoft Entra ID Premium P1` and click `Buy` - - Select `Pay monthly` - - Enter the number of licences required. - - Leave `automatically assign all of your users with no licences` checked - - Click `Check out now` - - Enter the address of the organisation running the Safe Haven on the next screen - - Click next and enter payment details when requested -- Switch back to your original administrator account - - Click on your username at the top right corner of the screen, then click "Sign in with a different account" - - Log in as the user you used to create the Safe Haven Microsoft Entra ID -
- -### Enable self-service password reset - -![Microsoft Entra ID: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=one%20minute) - -- Ensure your Azure Portal session is using the new Safe Haven Management (SHM) Microsoft Entra ID. - The name of the current directory is under your username in the top right corner of the Azure portal screen. - To change directories click on your username at the top right corner of the screen, then `Switch directory`, then the name of the new SHM directory. -- Click the "hamburger" menu in the top left corner (three horizontal lines) and select `Microsoft Entra ID` -- Click `Password reset` in the left hand sidebar -- Set the `Self service password reset enabled` toggle to `All` - - ```{image} deploy_shm/aad_sspr.png - :alt: AAD self-service password reset - :align: center - ``` - -- Click the `Save` icon - -```{error} -If you see a message about buying licences, you may need to refresh the page for the password reset option to show. -``` - -### Configure MFA on Microsoft Entra ID - -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) +:::{note} +A single SHM can manage all your SREs. +However, you may choose to use multiple SHMs if, for example, you want to separate production and development environments. +::: -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click `Users` in the left hand sidebar -- Click the `Per-user MFA` icon in the top bar of the users list. -- Click on `Service settings` at the top of the panel -- Configure MFA as follows: +## Requirements - - In the `App passwords` section select `Do not allow users to create app passwords to sign in to non-browser apps` - - Ensure the `Verification options` are set as follows: - - **check** `Call to phone` and `Notification through mobile app` (`Call to phone` is not available with a trial P2 licence) - - **uncheck** `Text message to phone` and `Verification code from mobile app or hardware token` - - In `Remember multi-factor authentication` section - - ensure `Allow users to remember multi-factor authentication on devices they trust` is **unchecked** - - Click "Save" and close window -
Screenshots +- A [Microsoft Entra](https://learn.microsoft.com/en-us/entra/fundamentals/) tenant for managing your users + - An account with [Global Administrator](https://learn.microsoft.com/en-us/entra/global-secure-access/reference-role-based-permissions#global-administrator) privileges on this tenant +- An Azure subscription where you will deploy your infrastructure + - An account with at least [Contributor](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles/general#contributor) permissions on this subscription - ```{image} deploy_shm/aad_mfa_settings.png - :alt: AAD MFA settings - :align: center - ``` +## Deployment -
+::::{admonition} Ensure you are using a hatch shell +:class: dropdown important -## 10. {{closed_lock_with_key}} Apply conditional access policies +You must use a `hatch` shell to run any `dsh` commands. +From the project base directory run: -(roles_system_deployer_shm_require_mfa)= +:::{code} shell +$ hatch shell +::: -### Require MFA for all users +This ensures that you are using the intended version of Data Safe Haven with the correct set of dependencies. +:::: -```{warning} -Before completing this step, **make sure you have confirmed you are able to successfully log in as the emergency access admin**, as this account will be the only one excluded from the MFA requirement. -``` +Before deploying the Safe Haven Management (SHM) infrastructure you need to decide on a few parameters: -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) +**entra_tenant_id** +: Tenant ID for the Entra ID used to manage TRE users -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click `Properties` in the left hand sidebar and **disable** security defaults as shown in the screenshot [here](https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/concept-fundamentals-security-defaults) - - Select `NO` from `Enable Security defaults` - - Select `My organization is using Conditional Access` and hit the `Save` button -- Click `Security` in the left hand sidebar -- Click `Conditional access` in the left hand sidebar -- Click the `+New Policy` icon in the top bar above the (empty) policy list -- Create a new policy as follows: - - Set the name to `Require MFA` - - Under `Users or workload identities` set the `Users and groups` condition to: - - **Include**: Select `All users` - - **Exclude**: - - Check `Users and groups` - - Select the `Admin - EMERGENCY ACCESS` user - - Select all `On-Premises Directory Synchronization Service Account` users - - Click `Select` - - Under `Cloud apps or actions` select `Cloud apps` in the drop-down menu and set: - - **Include**: Select `All cloud apps` - - **Exclude**: Leave unchanged as `None` - - Leave the `Conditions` condition unchanged (all showing as `Not configured`) - - Set the `Grant` condition to: - - Check `Grant access` - - Check `Require multi-factor authentication` - - Click `Select` - - Leave the `Session` condition unchanged - - Under `Enable policy` select `On` - - Check `I understand that my account will be impacted by this policy. Proceed anyway.` - - Click the `Create` button + :::{admonition} How to find your Microsoft Entra Tenant ID + :class: dropdown hint -(roles_system_deployer_shm_block_portal_access)= + - Go to the [Microsoft Entra admin centre](https://entra.microsoft.com/) + - Click on your username / profile icon in the top right + - Click **{guilabel}`Switch directory`** in the dropdown menu + - Ensure that you have selected the directory you chose above + - Browse to **{menuselection}`Identity --> Overview`** from the menu on the left side. + - Take note of the `Tenant ID` -### Block portal access for normal users + ::: -Most users have no reason to access the Azure portal using the SHM tenant. -Therefore we will block access for all users other than Global Administrators. +**fqdn** +: Domain name that your TRE users will belong to. -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) + :::{hint} + Use a domain that you own! If you use _e.g._ `bakerst.london` here your users will be given usernames like `sherlock.holmes@bakerst.london` + ::: -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Click `Security` in the left hand sidebar -- Click `Conditional Access` in the left hand sidebar -- Click on `New Policy` at the top of the panel -- Configure the policy as follows - - In the `Name` field enter `Restrict Microsoft Entra ID access` - - Under `Users or workload identities` set the `Users and groups` condition to: - - **Include**: Select `All users` - - **Exclude**: - - Check `Directory roles` - - In the drop-down menu select `Global administrator`. - This will ensure that only the administrator accounts you created in {ref}`the previous section ` are able to access the portal. - - Under `Cloud apps or actions` select `Cloud apps` in the drop-down menu and set: - - **Include**: - - Select `Select apps` - - In the pop-up menu on the right, select - - `Microsoft Azure Management` and - - `Microsoft Graph PowerShell` then - - Click `Select` - - **Exclude**: Leave unchanged as `None` - - Leave the `Conditions` condition unchanged (all showing as `Not configured`) - - Under the `Access controls` and `Grant` Headings click `0 controls selected` - - In the pop-up menu on the right select the `Block Access` radio button and click `Select` - - Under `Enable policy` select `On` - - Click the `Create` button +**location** +: Azure location where you want your resources deployed. -```{error} -Security defaults must be disabled in order to create this policy. -This should have been done when creating a policy to {ref}`require MFA for all users `. -``` + :::{hint} + Use the short name without spaces, _e.g._ **uksouth** not **UK South** + ::: -## 11. {{no_pedestrians}} Add MFA licences to any non-admin users +Once you've decided on these, run the following command: [approx 5 minutes]: -Administrator accounts can use MFA and reset their passwords without a licence needing to be assigned. -However, when you create non-admin users they will need to be assigned an Azure Active Directory licence in order to reset their own password. +:::{code} shell +$ dsh shm deploy --entra-tenant-id YOUR_ENTRA_TENANT_ID \ + --fqdn YOUR_DOMAIN_NAME \ + --location YOUR_LOCATION +::: -### Assigning MFA licences +:::{note} +You will be prompted to log in to the Azure CLI and to the Graph API. -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) +- Azure CLI: use your **infrastructure** user credentials +- Graph API: use your **Entra tenant** administrator credentials -- Ensure you are logged in to the Azure Portal in with the **native** administrator account you created. -- Ensure your session is using the new Safe Haven Management (SHM) Microsoft Entra ID. - The name of the current directory is under your username in the top right corner of the Azure portal screen. - To change directories click on your username at the top right corner of the screen, then `Switch directory`, then the name of the new SHM directory. -- Click the "hamburger" menu in the top left corner (three horizontal lines) and select `Microsoft Entra ID` -- Click `Licences` in the left hand sidebar -- Click `All products` in the left hand sidebar -- Click the relevant licence product [`Microsoft Entra ID P1` (production) or `Microsoft Entra ID P2` (test)] -- Click `Licensed users` in the left hand sidebar -- Click the `+Assign` icon in the top bar above the list of user licence assignments -- Click `+ Add users and groups` under `Users and groups` -- Click on the users you want to assign licences to -- Click `Select` -- Click `Review + Assign` -- Click `Assign` +::: - +:::{important} +You may be asked to delegate your domain name to Azure. To do this, you'll need to know details about the parent domain. For example, if you are deploying to `dsh.example.com` then the parent name is `example.com`. -#### Testing password self-reset +- Follow [this tutorial](https://learn.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns#delegate-the-domain) if the parent domain is hosted **outside Azure** +- Follow [this tutorial](https://learn.microsoft.com/en-us/azure/dns/tutorial-public-dns-zones-child#verify-the-child-dns-zone) if the parent domain is hosted **in Azure** -- Add a licence to the user you want to test with -- Go to [https://aka.ms/mfasetup](https://aka.ms/mfasetup) in an **incognito / private browsing** tab -- Enter the researcher username -- Click the `Forgotten my password` link -- Enter the captcha text and press next -- Enter your mobile phone number, making sure to prefix it with the country code and to **not include** the leading zero (`+ ` e.g. `+44 7700900000`). -- Enter the code that was texted to your phone -- Enter a new password -- Click the `Sign in with new password` link on the following page, or go to [https://aka.ms/mfasetup](https://aka.ms/mfasetup) again -- Enter the username and the new password -- Click `Next` at the `Help us to protect your account` prompt -- Follow the instructions to configure `Microsoft Authenticator` +::: diff --git a/docs/source/deployment/deploy_shm/AAD.png b/docs/source/deployment/deploy_shm/AAD.png deleted file mode 100644 index 7a4aaf4f73..0000000000 Binary files a/docs/source/deployment/deploy_shm/AAD.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_authentication_methods.png b/docs/source/deployment/deploy_shm/aad_authentication_methods.png deleted file mode 100644 index c893b57690..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_authentication_methods.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_create_admin.png b/docs/source/deployment/deploy_shm/aad_create_admin.png deleted file mode 100644 index e6f94585db..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_create_admin.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_creation.png b/docs/source/deployment/deploy_shm/aad_creation.png deleted file mode 100644 index 66ee92dade..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_creation.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_global_admin.png b/docs/source/deployment/deploy_shm/aad_global_admin.png deleted file mode 100644 index 56070e18cc..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_global_admin.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_mfa_settings.png b/docs/source/deployment/deploy_shm/aad_mfa_settings.png deleted file mode 100644 index 98b7bc63f6..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_mfa_settings.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_sspr.png b/docs/source/deployment/deploy_shm/aad_sspr.png deleted file mode 100644 index 8e11c71693..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_sspr.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/aad_tenant_id.png b/docs/source/deployment/deploy_shm/aad_tenant_id.png deleted file mode 100644 index b032d2ed6e..0000000000 Binary files a/docs/source/deployment/deploy_shm/aad_tenant_id.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/catalina_authentication.png b/docs/source/deployment/deploy_shm/catalina_authentication.png deleted file mode 100644 index 3f896dbae6..0000000000 Binary files a/docs/source/deployment/deploy_shm/catalina_authentication.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/certificate_details.png b/docs/source/deployment/deploy_shm/certificate_details.png deleted file mode 100644 index ef90510bbe..0000000000 Binary files a/docs/source/deployment/deploy_shm/certificate_details.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/dc_resource_groups.png b/docs/source/deployment/deploy_shm/dc_resource_groups.png deleted file mode 100644 index cc2ebe49e4..0000000000 Binary files a/docs/source/deployment/deploy_shm/dc_resource_groups.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/enable_password_writeback.png b/docs/source/deployment/deploy_shm/enable_password_writeback.png deleted file mode 100644 index 42d7e28a28..0000000000 Binary files a/docs/source/deployment/deploy_shm/enable_password_writeback.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/shm_subdomain_ns.png b/docs/source/deployment/deploy_shm/shm_subdomain_ns.png deleted file mode 100644 index 9463776d96..0000000000 Binary files a/docs/source/deployment/deploy_shm/shm_subdomain_ns.png and /dev/null differ diff --git a/docs/source/deployment/deploy_shm/vnet_resource_groups.png b/docs/source/deployment/deploy_shm/vnet_resource_groups.png deleted file mode 100644 index 766ed8c96f..0000000000 Binary files a/docs/source/deployment/deploy_shm/vnet_resource_groups.png and /dev/null differ diff --git a/docs/source/deployment/deploy_sre.md b/docs/source/deployment/deploy_sre.md index 9be11aae15..5ebeaa61a0 100644 --- a/docs/source/deployment/deploy_sre.md +++ b/docs/source/deployment/deploy_sre.md @@ -1,295 +1,78 @@ (deploy_sre)= -# Deploy a Secure Research Environment with Apache Guacamole +# Deploy a Secure Research Environment -These instructions will walk you through deploying a Secure Research Environment (SRE) that uses an existing Safe Haven Management (SHM) environment. +These instructions will deploy a new Secure Research Environment (SRE). -```{include} snippets/00_symbols.partial.md -:relative-images: -``` +::::{admonition} Ensure you are using a hatch shell +:class: dropdown important -## 1. {{seedling}} Prerequisites +You must use a `hatch` shell to run any `dsh` commands. +From the project base directory run: -```{include} snippets/01_prerequisites.partial.md -:relative-images: -``` +:::{code} shell +$ hatch shell +::: -(roles_deployer_sre_id)= +This ensures that you are using the intended version of Data Safe Haven with the correct set of dependencies. +:::: -## 2. {{clipboard}} Secure Research Environment configuration +## Configuration -```{include} snippets/02_configuration.partial.md -:relative-images: -``` +Each project will have its own dedicated SRE. -## 3. {{computer}} Deploy SRE +- Create a configuration file -![Powershell: a few hours](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20hours) at {{file_folder}} `./deployment/secure_research_environment/setup` +:::{code} shell +$ dsh config template --file PATH_YOU_WANT_TO_SAVE_YOUR_YAML_FILE_TO +::: -```powershell -PS> ./Deploy_SRE.ps1 -shmId -sreId -VMs -``` +- Edit this file in your favourite text editor, replacing the placeholder text with appropriate values for your setup. -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE -- where `` is a list of [Azure VM sizes](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) that you want to create. For example `'Standard_D2s_v3', 'default', 'Standard_NC6s_v3'`. If you are unsure of the appropriate VM sizes, run the script with a single `'default'`. The default VM size is `Standard_D2s_v3`. -- VMs can be resized after deployment. See how to do so in the {ref}`System Manager instructions `. +::::{admonition} Example YAML configuration file +:class: dropdown tip -You will be prompted for credentials for: +:::{code} yaml +azure: + subscription_id: # ID of the Azure subscription that the TRE will be deployed to + tenant_id: # Home tenant for the Azure account used to deploy infrastructure: `az account show` +description: # A free-text description of your SRE deployment +dockerhub: + access_token: # The password or personal access token for your Docker Hub account. We strongly recommend using a Personal Access Token with permissions set to Public Repo Read-only + username: # Your Docker Hub account name +name: # A name for your SRE deployment containing only letters, numbers, hyphens and underscores +sre: + admin_email_address: # Email address shared by all administrators + admin_ip_addresses: # List of IP addresses belonging to administrators + data_provider_ip_addresses: # List of IP addresses belonging to data providers + databases: # List of database systems to deploy + remote_desktop: + allow_copy: # True/False: whether to allow copying text out of the environment + allow_paste: # True/False: whether to allow pasting text into the environment + research_user_ip_addresses: # List of IP addresses belonging to users + software_packages: # any/pre-approved/none: which packages from external repositories to allow + timezone: # Timezone in pytz format (eg. Europe/London) + workspace_skus: # List of Azure VM SKUs - see cloudprice.net for list of valid SKUs +::: -- a user with admin rights over the Azure subscriptions you plan to deploy into -- a user with Global Administrator privileges over the SHM Microsoft Entra ID +:::: -This will perform the following actions, which can be run individually if desired: +## Upload the configuration file -
-Remove data from previous deployments +- Upload the config to Azure. This will validate your file and report any problems. -```{include} snippets/03_01_remove_data.partial.md -:relative-images: -``` +:::{code} shell +$ dsh config upload PATH_TO_YOUR_EDITED_YAML_FILE +::: -
+:::{hint} +If you want to make changes to the config, edit this file and then run `dsh config upload` again +::: -
-Register SRE with the SHM +## Deployment -```{include} snippets/03_02_register_sre.partial.md -:relative-images: -``` +- Deploy each SRE individually [approx 30 minutes]: -
- -
-Create SRE DNS Zone - -```{include} snippets/04_01_sre_dns.partial.md -:relative-images: -``` - -
- -```{include} snippets/04_02_manual_dns.partial.md -:relative-images: -``` - -
-Deploy the virtual network - -```{include} snippets/04_03_deploy_vnet.partial.md -:relative-images: -``` - -
- -
-Deploy storage accounts - -```{include} snippets/05_storage_accounts.partial.md -:relative-images: -``` - -
- -
-Deploy Apache Guacamole remote desktop - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Guacamole_Servers.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM. -- where `` is the {ref}`secure research environment ID ` for this SRE. - -
- -
-Update SSL certificate - -![Powershell: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=five%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Update_SRE_SSL_Certificate.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE -- where `` is an email address that you want to be notified when certificates are close to expiry - -```{tip} -`./Update_SRE_RDS_SSL_Certificate.ps1` should be run again whenever you want to update the certificate for this SRE. -``` - -```{caution} -`Let's Encrypt` will only issue **5 certificates per week** for a particular host (e.g. `rdg-sre-sandbox.project.turingsafehaven.ac.uk`). -To reduce the number of calls to `Let's Encrypt`, the signed certificates are stored in the Key Vault for easy redeployment. -For production environments this should usually not be an issue. -``` - -````{important} -If you find yourself frequently redeploying a test environment and hit the `Let's Encrypt` certificate limit, you can can use: - -```powershell -> ./Update_SRE_RDS_SSL_Certificate.ps1 -dryRun $true -``` - -to use the `Let's Encrypt` staging server, which will issue certificates more frequently. -These certificates will **not** be trusted by your browser, and so should not be used in production. -```` - -
- -
-Deploy web applications (CodiMD and GitLab) - -```{include} snippets/07_deploy_webapps.partial.md -:relative-images: -``` - -
- -
-Deploy databases - -```{include} snippets/08_databases.partial.md -:relative-images: -``` - -
- -
-Deploy Secure Research Desktops (SRDs) - -The `-VmSizes` parameter that you provided to the `Deploy_SRE.ps1` script determines how many SRDs are created and how large each one will be. - -```{note} -The following script will be run once for each `` that you specified. -If you specify the same size more than once, you will create multiple SRDs of that size. -``` - -```{include} snippets/09_single_srd.partial.md -:relative-images: -``` - -
- -
-Apply network configuration - -```{include} snippets/10_network_lockdown.partial.md -:relative-images: -``` - -
- -
-Configure firewall - -```{include} snippets/11_configure_firewall.partial.md -:relative-images: -``` - -
- -
-Configure monitoring - -```{include} snippets/12_configure_monitoring.partial.md -:relative-images: -``` - -
- -
-Enable backup - -```{include} snippets/13_enable_backup.partial.md -:relative-images: -``` - -
- -## 4. {{microscope}} Test deployed SRE - -(deploy_sre_apache_guacamole_create_user_account)= - -### {{bicyclist}} Verify non-privileged user account is set up - -```{include} snippets/06_01_create_user_account.partial.md -:relative-images: -``` - -To complete the account setup, follow the instructions for password and MFA setup present in the {ref}`user guide `. - -(deploy_sre_apache_guacamole_test_remote_desktop)= - -### {{pear}} Test the Apache Guacamole remote desktop - -- Launch a local web browser on your **deployment machine** and go to `https://.` and log in with the user name and password you set up for the non-privileged user account. - - For example for ` = project.turingsafehaven.ac.uk` and ` = sandbox` this would be `https://sandbox.project.turingsafehaven.ac.uk/` -- You should see a screen like the following. If you do not, follow the **troubleshooting** instructions below. - - ```{image} ../roles/researcher/user_guide/guacamole_dashboard.png - :alt: Guacamole dashboard - :align: center - ``` - -- At this point you should double click on the {{computer}} `Ubuntu0` link under `All Connections` which should bring you to the secure remote desktop (SRD) login screen -- You will need the short-form of the user name (ie. without the `@` part) and the same password as before -- This should bring you to the SRD that will look like the following - - ```{image} deploy_sre/guacamole_desktop.png - :alt: Guacamole dashboard - :align: center - ``` - -```{important} -Ensure that you are connecting from one of the **permitted IP ranges** specified in the `inboundAccessFrom` section of the SRE config file. -For example, if you have authorised a corporate VPN, check that you have correctly configured you client to connect to it. -``` - -````{error} -If you see an error like the following when attempting to log in, it is likely that the Microsoft Entra application is not registered as an `ID token` provider. - -```{image} deploy_sre/guacamole_aad_idtoken_failure.png -:alt: AAD ID token failure -:align: center -``` - -
Register Microsoft Entra application - -![Microsoft Entra ID: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%2 -0ID&color=blue&message=one%20minute) - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- Navigate to `Microsoft Entra ID > App registrations`, and select the application called `Guacamole SRE `. -- Click on `Authentication` on the left-hand sidebar -- Ensure that the `ID tokens` checkbox is ticked and click on the `Save` icon if you had to make any changes - ```{image} deploy_sre/guacamole_aad_app_registration_idtoken.png - :alt: Microsoft Entra app registration - :align: center - ``` -
-```` - -### {{snowflake}} Test CodiMD and GitLab servers - -- Connect to the remote desktop {ref}`using the instructions above ` -- Test `CodiMD` by clicking on the `CodiMD` desktop icon. - - This should open a web browser inside the remote desktop - - Log in with the short-form `username` of a user in the `SG Research Users` security group. -- Test `GitLab` by clicking on the `GitLab` desktop icon. - - This should open a web browser inside the remote desktop - - Log in with the short-form `username` of a user in the `SG Research Users` security group. - -````{error} -Should there be any issues using the web apps (e.g. unable to log in, or log in page not appearing) you can inspect the build log and access the console for the relevant VMs following the guide for {ref}`System Managers ` -```` - -### {{fire}} Run smoke tests on SRD - -```{include} snippets/14_run_smoke_tests.partial.md -:relative-images: -``` +:::{code} shell +$ dsh sre deploy YOUR_SRE_NAME +::: diff --git a/docs/source/deployment/deploy_sre/guacamole_aad_app_registration_idtoken.png b/docs/source/deployment/deploy_sre/guacamole_aad_app_registration_idtoken.png deleted file mode 100644 index e6da9e1ac3..0000000000 Binary files a/docs/source/deployment/deploy_sre/guacamole_aad_app_registration_idtoken.png and /dev/null differ diff --git a/docs/source/deployment/deploy_sre/guacamole_aad_idtoken_failure.png b/docs/source/deployment/deploy_sre/guacamole_aad_idtoken_failure.png deleted file mode 100644 index 6b446141bf..0000000000 Binary files a/docs/source/deployment/deploy_sre/guacamole_aad_idtoken_failure.png and /dev/null differ diff --git a/docs/source/deployment/deploy_sre/guacamole_desktop.png b/docs/source/deployment/deploy_sre/guacamole_desktop.png deleted file mode 100644 index d329da784d..0000000000 Binary files a/docs/source/deployment/deploy_sre/guacamole_desktop.png and /dev/null differ diff --git a/docs/source/deployment/deploy_sre/sre_subdomain_ns.png b/docs/source/deployment/deploy_sre/sre_subdomain_ns.png deleted file mode 100644 index e76e808d4e..0000000000 Binary files a/docs/source/deployment/deploy_sre/sre_subdomain_ns.png and /dev/null differ diff --git a/docs/source/deployment/index.md b/docs/source/deployment/index.md index 78c8df303e..db75291385 100644 --- a/docs/source/deployment/index.md +++ b/docs/source/deployment/index.md @@ -1,43 +1,51 @@ # Deployment -```{toctree} +:::{toctree} :hidden: +setup_context.md +configure_entra_id.md deploy_shm.md -build_srd_image.md deploy_sre.md -security_checklist.md -``` +::: Deploying an instance of the Data Safe Haven involves the following steps: +- Configuring the context used to host the Pulumi backend infrastructure +- Configuring the Microsoft Entra directory where you will manage users - Deploying the Safe Haven management component -- Building a secure research desktop virtual machine image to be used by all projects - Deploying a Secure Research Environment for each project -Deployment might be carried out by members of an institutional IT team or external contractors. -In either case, the deploying team should ensure that the system is working as expected before handing it over to the {ref}`System Managers `. -We suggest developing a security checklist for deployers to work through - an example of one used at the Alan Turing Institute is shown below. +## Requirements -For instructions on removing deployed resources, refer to the guide for {ref}`System Managers `. +Install the following requirements before starting -[Safe Haven Management (SHM) deployment guide](deploy_shm.md) -: deploy a single Safe Haven Management (SHM) segment. This will deploy infrastructure shared between projects such as user management and package mirrors/proxies. +- [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) +- [Hatch](https://hatch.pypa.io/1.9/install/) +- [Pulumi](https://www.pulumi.com/docs/get-started/install/) -[Secure Research Desktop (SRD) build instructions](build_srd_image.md) -: build and publish our "batteries included" Secure Research Desktop (SRD) virtual machine image. Instructions about how to customise this are also available here. +### Docker Hub -[Secure Research Environment (SRE) deployment guide](deploy_sre.md) -: deploy one Secure Research Environment (SRE) for each project you want to have its own independent, isolated analysis environment. +The Data Safe Haven uses several public Docker images. +As Docker Hub now imposes [rate limits](https://docs.docker.com/docker-hub/download-rate-limit/) on anonymous downloads, you will need to use a Docker Hub account to deploy the Data Safe Haven. +You can create one following [the instructions here](https://hub.docker.com/) if you do not already have one. -[Security checklist](security_checklist.md) -: an example security checklist used at the Alan Turing Institute to help evaluate the security of our deployments. +:::{important} +We recommend using a personal access token (PAT) with **Public Repo Read-Only** permissions rather than your Docker account password. +See [the instructions here](https://docs.docker.com/security/for-developers/access-tokens/) for details of how to create a PAT. +::: -````{warning} -Microsoft have renamed Azure Active Directory to [Microsoft Entra ID](https://learn.microsoft.com/en-us/entra/fundamentals/new-name). -We have updated these guides in the light of this change. -However, as of February 2024, Microsoft have not completed the renaming process. -Some software and documentation retains the old Azure Active Directory name. -Our documentation reflects the name that is currently in use, rather than the name that will be used once the renaming process is complete. -Where we use the name "Azure Active Directory", if the corresponding software, menu option, or documentation cannot be found, look instead for a version using the Microsoft Entra ID name. -```` +## Install the project + +Download or checkout this code from GitHub. + +:::{important} +**{sub-ref}`today`**: you should use the `develop` branch as no stable v5 release has been tagged. +Please contact the development team in case of any problems. +::: + +Enter the base directory and install Python dependencies with `hatch` by doing the following: + +:::{code} shell +$ hatch run true +::: diff --git a/docs/source/deployment/security_checklist.md b/docs/source/deployment/security_checklist.md deleted file mode 100644 index 173f63568f..0000000000 --- a/docs/source/deployment/security_checklist.md +++ /dev/null @@ -1,773 +0,0 @@ -(deployment_security_checklist)= - -# Security evaluation checklist - -```{caution} -This security checklist is used by the Alan Turing Institute to evaluate compliance with our default controls. -Organisations are responsible for making their own decisions about the suitability of any of our default controls and should treat this checklist as an example, not a template to follow. -``` - -In this check list we aim to **evaluate** our deployment against the {ref}`security configuration ` that we apply at the Alan Turing Institute. -The security checklist currently focuses on checks that can evaluate these security requirements for {ref}`policy_tier_2` (or greater) SREs (with some steps noted as specific to a tier): - -## How to use this checklist - -- Ensure you have an SHM and attached SRE(s) that you wish to test. - -```{note} -Some parts of the checklist are only relevant when there are multiple SREs attached to the same SHM. -``` - -- Work your way through the actions described in each section, taking care to notice each time you see a {{camera}} or a {{white_check_mark}} and the word **Verify**: - -```{note} -- {{camera}} Where you see the camera icon, there should be accompanying screenshot(s) of evidence for this item in the checklist (you may wish to save your own equivalent screenshots as evidence) -- {{white_check_mark}} This indicates a checklist item for which a screenshot is either not appropriate or difficult -``` - -## Prerequisites - -- **Deployed SHM** that you are testing -- **Deployed SRE A** that is attached to the SHM -- **Deployed SRE B** that is attached to the same SHM -- **VPN access** to the SHM that you are testing - -```{important} -- If you haven't already, you'll need download a VPN certificate and configure {ref}`VPN access ` for the SHM -- Make sure you can use Remote Desktop to log in to the {ref}`domain controller (DC1) `. -``` - -The following users will be needed for this checklist - -- **SRE standard user** who is a member of the **SRE A** research users group - - Create a new user **without** MFA - - Following the SRE deployment instructions for setting up a {ref}`non privileged user account `, create an account but **do not** add them to any `SG Research Users` group. - - Visit [`https://aka.ms/sspr`](https://aka.ms/sspr) in an incognito browser - - Attempt to login and reset password, but **do not complete MFA** (see {ref}`these steps `) -- {ref}`role_system_manager` who has `Contributor` permissions (or higher) on the underlying Azure subscription -- **Data provider** who has no accounts on the Safe Haven system - -## 1. Multifactor authentication and password strength - -### Turing configuration setting: - -- Users must set up MFA before accessing the secure analysis environment. -- Users cannot access the environment without MFA. -- Users are required/advised to create passwords of a certain strength. - -### Implication: - -- Users are required to authenticate with Multi-factor Authentication (MFA) in order to access the secure analysis environment. -- Passwords are strong - -### Verify by: - -#### Check: Non-group user cannot access the apps - -Attempt to login to the remote desktop web client as the **SRE standard user** - -````{attention} -{{camera}} Verify that: -
user is prompted to setup MFA - -```{image} security_checklist/login_no_mfa_guacamole.png -:alt: Guacamole MFA setup prompt -:align: center -``` - -
-```` - -#### Check: Membership of the correct group is insufficient to give access - -Add the **SRE standard user** to the relevant `Research Users` group under `Safe Haven Security Groups` on the domain controller. - -````{attention} -{{camera}} Verify that: -
user is prompted to setup MFA - -```{image} security_checklist/login_no_mfa_guacamole.png -:alt: Guacamole MFA setup prompt -:align: center -``` - -
-```` - -#### User can self-register for MFA - -Check that the **SRE standard user** is able to successfully set up MFA - -- Visit [`https://aka.ms/mfasetup`](https://aka.ms/mfasetup) in an incognito browser -- Login as the user you set up - -```{attention} -{{white_check_mark}} **Verify that:** user is guided to set up MFA -``` - -- Set up MFA as per {ref}`the user guide instructions `. - -````{attention} -{{camera}} Verify that: - -
MFA setup is successful - -```{image} security_checklist/aad_additional_security_verification.png -:alt: AAD additional security verification -:align: center -``` -
-```` - -#### User can login after setting up MFA - -Check that the **SRE standard user** can authenticate with MFA. - -- Login to the remote desktop web client as the **SRE standard user**. - -````{attention} -{{camera}} Verify that: -
you are prompted for MFA and can respond - -```{image} security_checklist/aad_mfa_approve_signin_request.png -:alt: AAD MFA approve sign-in request -:align: center -``` - -
-```` - -#### Authenticated user can access the Secure Research Desktop (SRD) desktop - -Check that the **SRE standard user** can access the Secure Research Desktop (SRD) desktop. - -- Login to the remote desktop web client as the **SRE standard user**. - -````{attention} -{{camera}} Verify that: -
you can connect to Desktop: Ubuntu0 - -```{image} security_checklist/guacamole_srd_desktop.png -:alt: SRD desktop -:align: center -``` - -
-```` - -## 2. Isolated Network - -### Turing configuration setting: - -- {ref}`Researchers ` cannot access any part of the network from outside the network. -- VMs in the SHM are only accessible by {ref}`System Managers ` using the management VPN. -- Whilst in the network, one cannot use the internet to connect outside the network. -- SREs in the same SHM are isolated from one another. - -### Implication: - -- The Data Safe Haven network is isolated from external connections (both {ref}`policy_tier_2` and {ref}`policy_tier_3`) - -### Verify by: - -#### Connect to SHM VMs if and only if connected to the SHM VPN: - -- Connect to the SHM VPN -- Attempt to connect to the SHM DC - -```{attention} -{{white_check_mark}} **Verify that:** connection works -``` - -- Disconnect from the SHM VPN -- Attempt to connect to the SHM DC - -```{attention} -{{white_check_mark}} **Verify that:** connection fails -``` - -#### Fail to connect to the internet from within an SRD on the SRE network - -- Login as a user to an SRD from within the SRE by using the web client. -- Choose your favourite three websites and attempt to access the internet using a browser - -````{attention} -{{camera}} Verify that: - -
browsing to the website fails - -```{image} security_checklist/srd_no_internet.png -:alt: SRD no internet -:align: center -``` - -
- -
you cannot access the website using curl - -```{image} security_checklist/srd_no_curl.png -:alt: SRD no curl -:align: center -``` - -
- -
you cannot look up the IP address for the website using nslookup - -```{image} security_checklist/srd_no_nslookup.png -:alt: SRD no curl -:align: center -``` -
-```` - -#### SREs are isolated from one another - -Check that users cannot connect from one SRE to another one in the same SHM, even if they have access to both SREs - -- Ensure that the **SRE standard user** is a member of the research users group for both **SRE A** and **SRE B** -- Log in to an SRD in **SRE A** as the **SRE standard user** using the web client. -- Open the `Terminal` app from the dock at the bottom of the screen and enter `ssh -v -o ConnectTimeout=10 ` where the IP address is one for an SRD in SRE B (you can find this in the Azure portal) - -````{attention} -{{camera}} Verify that: - -
SSH connection fails - -```{image} security_checklist/ssh_connection_fail.png -:alt: SSH connection failure -:align: center -``` - -
-```` - -- Check that users cannot copy files from one SRE to another one in the same SHM - - Log in to an SRD in **SRE A** as the **SRE standard user** using the web client. - - In a separate browser window, do the same for **SRE B**. - - Attempt to copy and paste a file from one SRE desktop to another - -```{attention} -{{white_check_mark}} **Verify that:** copy-and-paste is not possible -``` - -- Check that the network rules are set appropriately to block outgoing traffic -- Visit the portal and find `NSG_SHM__SRE__COMPUTE`, then click on `Settings > Outbound security rules` - -````{attention} -{{camera}} Verify that: - -
there exists an NSG rule with Destination Internet and Action Deny and that no higher priority rule allows connection to the internet. - -```{image} security_checklist/nsg_outbound_access.png -:alt: NSG outbound access -:align: center -``` -
-```` - -## 3. User devices - -### Turing configuration setting: - -- Managed devices must be provided by an approved organisation and the user must not have administrator access to them. -- Network rules for higher tier environments permit access only from IP ranges corresponding to `Restricted` networks that only permit managed devices to connect. - -### Implication: - -- At {ref}`policy_tier_3`, only managed devices can connect to the Data Safe Haven environment. -- At {ref}`policy_tier_2`, any device can connect to the Data Safe Haven environment (with VPN connection and correct credentials). - -### Verify by: - -#### User devices ({ref}`policy_tier_2`) - -- Connect to the environment using an allow-listed IP address and credentials - -```{attention} -{{white_check_mark}} **Verify that:** connection succeeds -``` - -- Connect to the environment from an IP address that is not allow-listed but with correct credentials. - -```{attention} -{{white_check_mark}} **Verify that:** connection fails -``` - -#### User devices ({ref}`policy_tier_3`) - -All managed devices should be provided by a known IT team at an approved organisation. - -```{attention} -{{white_check_mark}} **Verify that:** the IT team of the approved organisation take responsibility for managing the device. -``` - -```{attention} -{{white_check_mark}} **Verify that:** the user does not have administrator permissions on the device. -``` - -```{attention} -{{white_check_mark}} **Verify that:** allow-listed IP addresses are exclusive to managed devices. -``` - -- Connect to the environment using an allow-listed IP address and credentials - -```{attention} -{{white_check_mark}} **Verify that:** connection succeeds -``` - -- Connect to the environment from an IP address that is not allow-listed but with correct credentials. - -```{attention} -{{white_check_mark}} **Verify that:** connection fails -``` - -#### Network rules ({ref}`policy_tier_2` and above): - -There are network rules permitting access to the remote desktop gateway from allow-listed IP addresses only - -- Navigate to the NSG for this SRE in the portal: - - {{pear}} `NSG_SHM__SRE__GUACAMOLE` - -````{attention} -{{camera}} Verify that: - -
the NSG has network rules allowing inbound access from allow-listed IP addresses only - -```{image} security_checklist/nsg_inbound_access.png -:alt: NSG inbound access -:align: center -``` -
-```` - -```{attention} -{{white_check_mark}} **Verify that:** all other NSGs (apart from `NSG_SHM*_SRE_\_DEPLOYMENT`) have an inbound `Deny All` rule and no higher priority rule allowing inbound connections from outside the Virtual Network (apart from the Admin VPN in some cases). -``` - -## 4. Physical security - -### Turing configuration setting: - -- Medium security research spaces control the possibility of unauthorised viewing. -- Card access or other means of restricting entry to only known researchers (such as the signing in of guests on a known list) is required. -- Screen adaptations or desk partitions can be adopted in open-plan spaces if there is a high risk of "visual eavesdropping". -- Firewall rules for the Environments can permit access only from Restricted network IP ranges corresponding to these research spaces. - -### Implication: - -- At {ref}`policy_tier_3` access is limited to certain secure physical spaces - -### Verify by: - -#### Physical security ({ref}`policy_tier_3`) - -Connection from outside the secure physical space is not possible. - -- Attempt to connect to the {ref}`policy_tier_3` SRE web client from home using a managed device and the correct VPN connection and credentials - -```{attention} -{{white_check_mark}} **Verify that:** connection fails -``` - -Connection from within the secure physical space is possible. - -- Attempt to connect from research office using a managed device and the correct VPN connection and credentials - -```{attention} -{{white_check_mark}} **Verify that:** connection succeeds -``` - -```{attention} -{{white_check_mark}} **Verify that:** check the network IP ranges corresponding to the research spaces and compare against the IPs accepted by the firewall. -``` - -```{attention} -{{white_check_mark}} **Verify that:** confirm in person that physical measures such as screen adaptions or desk partitions are present if risk of visual eavesdropping is high. -``` - -## 5. Remote connections - -### Turing configuration setting: - -- User can connect via remote desktop but cannot connect through other means such as `SSH` - -### Implication: - -- Connections can only be made via remote desktop ({ref}`policy_tier_2` and above) - -### Verify by: - -#### SSH connection is not possible - -- Attempt to login as the **SRE standard user** via `SSH` with `ssh @.` (e.g. `ssh -v -o ConnectTimeout=10 ada.lovelace@sandbox.turingsafehaven.ac.uk`) - -````{attention} -{{camera}} Verify that: - -
SSH login by fully-qualified domain name fails - -```{image} security_checklist/srd_no_ssh_by_fqdn.png -:alt: SRD SSH connection by FQDN not possible -:align: center -``` -
-```` - -- Find the public IP address for the remote desktop server VM by searching for this VM in the portal, then looking at `Connect` under `Settings`. - - {{pear}} VM name will be `GUACAMOLE-SRE-` -- Attempt to login as the **SRE standard user** via `SSH` with `ssh @` (e.g. `ssh ada.lovelace@8.8.8.8`) - -````{attention} -{{camera}} Verify that: - -
SSH login by public IP address fails - -```{image} security_checklist/srd_no_ssh_by_ip.png -:alt: SRD SSH connection by IP address not possible -:align: center -``` -
-```` - -```{attention} -{{white_check_mark}} **Verify that:** the remote desktop server (`RDG-SRE-`) is the only SRE resource with a public IP address -``` - -## 6. Copy-and-paste - -### Turing configuration setting: - -- Users cannot copy something from outside the network and paste it into the network. -- Users cannot copy something from within the network and paste it outside the network. - -### Implication: - -- Copy and paste is disabled on the remote desktop - -### Verify by: - -#### Users are unable to copy-and-paste between the SRD and their local device - -- Copy some text from your deployment device -- Login to an SRD as the **SRE standard user** via the remote desktop web client -- Open up a notepad or terminal on the SRD and attempt to paste the text to it. - -```{attention} -{{white_check_mark}} **Verify that:** paste fails -``` - -- Write some next in the note pad or terminal of the SRD and copy it -- Attempt to copy the text externally to deployment device (e.g. into URL of browser) - -```{attention} -{{white_check_mark}} **Verify that:** paste fails -``` - -#### Users can copy between VMs inside the network - -- Login to an SRD as the **SRE standard user** via the remote desktop web client -- Open up a notepad or terminal on the SRD and attempt to paste the text to it. -- In another tab or browser connect to a different SRD (or to the same VM via the SSH connection) using the remote desktop web client -- Attempt to paste the text to it. - -```{attention} -{{white_check_mark}} **Verify that:** paste succeeds -``` - -## 7. Data ingress - -### Turing configuration setting: - -- Prior to access to the ingress volume being provided, the {ref}`role_data_provider_representative` must provide the IP address(es) from which data will be uploaded and a secure mechanism by which a time-limited upload token can be sent, such as an encrypted email system. -- Once these details have been received, the data ingress volume should be opened for data upload. - -To minimise the risk of unauthorised access to the dataset while the ingress volume is open for uploads, the following security measures are in place: - -- Access to the ingress volume is restricted to a limited range of IP addresses associated with the **Dataset Provider** and the **host organisation**. -- The {ref}`role_data_provider_representative` receives a write-only upload token. - - This allows them to upload, verify and modify the uploaded data, but does not viewing or download of the data. - - This provides protection against an unauthorised party accessing the data, even they gain access to the upload token. -- The upload token expires after a time-limited upload window. -- The upload token is transferred to the Dataset Provider via the provided secure mechanism. - -### Implication: - -- All data transfer to the Data Safe Haven should be via our secure data transfer process, which gives the {ref}`role_data_provider_representative` time-limited, write-only access to a dedicated data ingress volume from a specific location. -- Data is stored securely until approved for user access. - -### Verify by: - -To test all the above, you will need to act both as the {ref}`role_system_manager` and {ref}`role_data_provider_representative`: - -#### Check that the {ref}`role_system_manager` can send an upload token to the {ref}`role_data_provider_representative` over a secure channel - -- Use the IP address of your own device in place of that of the data provider -- Generate an upload token with write-only permissions following the instructions in the {ref}`administrator document `. - -```{attention} -{{white_check_mark}} **Verify that:** the upload token is successfully created. -``` - -```{attention} -{{white_check_mark}} **Verify that:** you are able to send this token using a secure mechanism. -``` - -#### Ensure that data ingress works only for connections from the accepted IP address range - -- As the {ref}`role_data_provider_representative`, ensure you're working from a device that has an allow-listed IP address -- Using the upload token with write-only permissions and limited time period that you set up in the previous step, follow the ingress instructions for the {ref}`data provider ` - -```{attention} -{{white_check_mark}} **Verify that:** writing succeeds by uploading a file -``` - -```{attention} -{{white_check_mark}} **Verify that:** attempting to open or download any of the files results in the following error: `Failed to start transfer: Insufficient credentials.` under the `Activities` pane at the bottom of the MS Azure Storage Explorer window. -``` - -- Switch to a device that lacks an allow-listed IP address (or change your IP with a VPN) -- Attempt to write to the ingress volume via the test device - -```{attention} -{{white_check_mark}} **Verify that:** the access token fails. -``` - -#### Check that the upload fails if the token has expired - -- Create a write-only token with short duration - -```{attention} -{{white_check_mark}} **Verify that:** you can connect and write with the token during the duration -``` - -```{attention} -{{white_check_mark}} **Verify that:** you cannot connect and write with the token after the duration has expired -``` - -```{attention} -{{white_check_mark}} **Verify that:** the overall ingress works by uploading different kinds of files, e.g. data, images, scripts (if appropriate). -``` - -## 8. Data egress - -### Turing configuration setting: - -- Users can write to the `/output` volume -- A {ref}`role_system_manager` can view and download data in the `/output` volume via `Azure Storage Explorer`. - -### Implication: - -- SREs contain an `/output` volume, in which SRE users can store data designated for egress. - -### Verify by: - -#### Confirm that a non-privileged user is able to read the different storage volumes and write to output - -- Login to an SRD as the **SRE standard user** via the remote desktop web client -- Open up a file explorer and search for the various storage volumes - -```{attention} -{{white_check_mark}} **Verify that:** the `/output` volume exists and can be read and written to -``` - -```{attention} -{{white_check_mark}} **Verify that:** the permissions of other storage volumes match that {ref}`described in the user guide `. -``` - -#### Confirm that the different volumes exist in blob storage and that logging on requires domain admin permissions - -- As the {ref}`role_system_manager`, follow the instructions in the {ref}`administrator document ` on how to access files set for egress with `Azure Storage Explorer`. - -```{attention} -{{white_check_mark}} **Verify that:** you can see the files written to the `/output` storage volume (including any you created as a non-privileged user in step 1) -``` - -```{attention} -{{white_check_mark}} **Verify that:** a written file can be taken out of the environment via download -``` - -## 9. Software ingress - -### Turing configuration setting: - -- For {ref}`policy_tier_0` and {ref}`policy_tier_1` environments, outbound internet access means users can directly download their software from the internet. -- For {ref}`policy_tier_2` or higher environments we use the secure data transfer process. - -- Installation during deployment - - If known in advance, software can be installed during SRD deployment whilst there is still internet access, but before project data is added. Once the software is installed, the SRD undergoes ingress into the environment with a one way lock. -- Installation after deployment - - Once an SRD has been deployed into the analysis environment it cannot be moved out. There is no outbound internet access. - - Software is added via ingress in a similar manner to data: - - Researchers are provided temporary write-only access to the software ingress volume. - - The access is then revoked and the software is then reviewed. - - If it passes review, the software is moved into the environment. - - If the software requires administrator rights to install, a {ref}`role_system_manager` must do this. Otherwise, the researcher can do this themselves. - -### Implication: - -- The base SRD provided in the SREs comes with a wide range of common data science software pre-installed, as well as package mirrors. -- Additional software must be added separately via ingress. - -### Verify by: - -#### Check that some software tools were installed as expected during deployment - -- Login to an SRD as the **SRE standard user** via the remote desktop web client - -````{attention} -{{camera}} Verify that: - -
the following programmes can be opened without issue: DBeaver, RStudio, PyCharm and Visual Studio Code - -```{image} security_checklist/srd_installed_software.png -:alt: SRD installed software -:align: center -``` -
-```` - -#### Check that it's possible to grant and revoke software ingress capability - -- Follow the instructions in the {ref}`Safe Haven Administrator Documentation `: - -```{attention} -{{white_check_mark}} **Verify that:** you can generate a temporary write-only upload token -``` - -```{attention} -{{white_check_mark}} **Verify that:** you can upload software as a non-admin with this token, but write access is revoked after the temporary token has expired -``` - -```{attention} -{{white_check_mark}} **Verify that:** software uploaded by a non-admin can be read by administrators -``` - -```{attention} -{{white_check_mark}} **Verify that:** the **SRE standard user** cannot install software that requires administrator rights (e.g. anything that is installed with `apt`) -``` - -## 10. Software package repositories - -### Turing configuration setting:: - -- {ref}`policy_tier_2`: The user can access any package from our mirrors or via our proxies. They can freely use these packages without restriction. -- {ref}`policy_tier_3`: The user can only access a specific pre-agreed set of packages. They will be unable to download any package not on the allowed list. - -### Implication: - -- {ref}`policy_tier_2`: User can access all packages from PyPI/CRAN -- {ref}`policy_tier_3`: User can only access approved packages from PyPI/CRAN. Allowed list is in `environment_configs/package_lists` - -### Verify by: - -#### {ref}`policy_tier_2`: Download a package that is **not** on the allow list - -- Login as the **SRE standard user** into an SRD via remote desktop web client -- Open up a terminal -- Attempt to install a package on the allowed list that is not included out-of-the-box (for example, try `pip install aero-calc`) - -````{attention} -{{camera}} Verify that: - -
you can install the package - -```{image} security_checklist/srd_pypi_tier2_allow_listed.png -:alt: SRD PyPI Tier 2 -:align: center -``` -
-```` - -- Attempt to install any package that is not on the allowed list (for example, try `pip install awscli`) - -````{attention} -{{camera}} Verify that: - -
you can install the package - -```{image} security_checklist/srd_pypi_tier2_not_allow_listed.png -:alt: SRD PyPI Tier 2 -:align: center -``` -
-```` - -#### {ref}`policy_tier_3`: Download a package on the allow list and one **not** on the allow list - -- Login as the **SRE standard user** into an SRD via remote desktop web client -- Attempt to install a package on the allowed list that is not included out-of-the-box (for example, try `pip install aero-calc`) - -````{attention} -{{camera}} Verify that: - -
you can install the package - -```{image} security_checklist/srd_pypi_tier3_allowed.png -:alt: SRD PyPI Tier 3 -:align: center -``` -
-```` - -- Then attempt to download a package that is not included in the allowed list (for example, try `pip install awscli`) - -````{attention} -{{camera}} Verify that: - -
you cannot install the package - -```{image} security_checklist/srd_pypi_tier3_denied.png -:alt: SRD PyPI Tier 3 -:align: center -``` -
-```` - -## 11. Firewall controls - -### Turing configuration setting: - -- Whilst all user accessible VMs are entirely blocked off from the internet, this is not the case for administrator-only VMs. -- An Azure Firewall governs the internet access provided to these VMs, limiting them mostly to downloading system updates. - -### Implication: - -- An `Azure Firewall` ensures that the administrator VMs have the minimal level of internet access required to function. - -### Verify by: - -#### Admin has limited access to the internet - -- As the {ref}`role_system_manager` use Remote Desktop to connect to the SHM domain controller VM -- Attempt to connect to a non-approved site, such as `www.google.com` - -````{attention} -{{camera}} Verify that: - -
connection fails - -```{image} security_checklist/shmdc_website_deny.png -:alt: SHM DC website denied -:align: center -``` -
-```` - -#### Admin can download Windows updates - -- As the {ref}`role_system_manager` use Remote Desktop to connect to the SHM domain controller VM -- Click on `Start -> Settings-> Update & Security` -- Click the `Download` button - -````{attention} -{{camera}} Verify that: - -
updates download and install successfully - -```{image} security_checklist/shmdc_windows_update.png -:alt: SHM DC update allowed -:align: center -``` -
-```` diff --git a/docs/source/deployment/security_checklist/aad_additional_security_verification.png b/docs/source/deployment/security_checklist/aad_additional_security_verification.png deleted file mode 100644 index eca1e1fe8b..0000000000 Binary files a/docs/source/deployment/security_checklist/aad_additional_security_verification.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/aad_mfa_approve_signin_request.png b/docs/source/deployment/security_checklist/aad_mfa_approve_signin_request.png deleted file mode 100644 index ac0c3f542c..0000000000 Binary files a/docs/source/deployment/security_checklist/aad_mfa_approve_signin_request.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/guacamole_srd_desktop.png b/docs/source/deployment/security_checklist/guacamole_srd_desktop.png deleted file mode 100644 index 81453d284a..0000000000 Binary files a/docs/source/deployment/security_checklist/guacamole_srd_desktop.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/login_no_mfa_guacamole.png b/docs/source/deployment/security_checklist/login_no_mfa_guacamole.png deleted file mode 100644 index 0f5588ca7a..0000000000 Binary files a/docs/source/deployment/security_checklist/login_no_mfa_guacamole.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/nsg_inbound_access.png b/docs/source/deployment/security_checklist/nsg_inbound_access.png deleted file mode 100644 index 5b3d9fd990..0000000000 Binary files a/docs/source/deployment/security_checklist/nsg_inbound_access.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/nsg_outbound_access.png b/docs/source/deployment/security_checklist/nsg_outbound_access.png deleted file mode 100644 index 33e4d46960..0000000000 Binary files a/docs/source/deployment/security_checklist/nsg_outbound_access.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/shmdc_website_deny.png b/docs/source/deployment/security_checklist/shmdc_website_deny.png deleted file mode 100644 index 205634d931..0000000000 Binary files a/docs/source/deployment/security_checklist/shmdc_website_deny.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/shmdc_windows_update.png b/docs/source/deployment/security_checklist/shmdc_windows_update.png deleted file mode 100644 index 28c524016c..0000000000 Binary files a/docs/source/deployment/security_checklist/shmdc_windows_update.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_installed_software.png b/docs/source/deployment/security_checklist/srd_installed_software.png deleted file mode 100644 index 477bbb1c1d..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_installed_software.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_no_curl.png b/docs/source/deployment/security_checklist/srd_no_curl.png deleted file mode 100644 index 1cd79b20e7..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_no_curl.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_no_internet.png b/docs/source/deployment/security_checklist/srd_no_internet.png deleted file mode 100644 index 58351c6f35..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_no_internet.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_no_nslookup.png b/docs/source/deployment/security_checklist/srd_no_nslookup.png deleted file mode 100644 index c10379c355..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_no_nslookup.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_no_ssh_by_fqdn.png b/docs/source/deployment/security_checklist/srd_no_ssh_by_fqdn.png deleted file mode 100644 index 776cb7fbe9..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_no_ssh_by_fqdn.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_no_ssh_by_ip.png b/docs/source/deployment/security_checklist/srd_no_ssh_by_ip.png deleted file mode 100644 index 88555d659d..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_no_ssh_by_ip.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_pypi_tier2_allow_listed.png b/docs/source/deployment/security_checklist/srd_pypi_tier2_allow_listed.png deleted file mode 100644 index 87b1c0c3f1..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_pypi_tier2_allow_listed.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_pypi_tier2_not_allow_listed.png b/docs/source/deployment/security_checklist/srd_pypi_tier2_not_allow_listed.png deleted file mode 100644 index 06e9c05d9d..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_pypi_tier2_not_allow_listed.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_pypi_tier3_allowed.png b/docs/source/deployment/security_checklist/srd_pypi_tier3_allowed.png deleted file mode 100644 index 99a6df163d..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_pypi_tier3_allowed.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/srd_pypi_tier3_denied.png b/docs/source/deployment/security_checklist/srd_pypi_tier3_denied.png deleted file mode 100644 index ca53773296..0000000000 Binary files a/docs/source/deployment/security_checklist/srd_pypi_tier3_denied.png and /dev/null differ diff --git a/docs/source/deployment/security_checklist/ssh_connection_fail.png b/docs/source/deployment/security_checklist/ssh_connection_fail.png deleted file mode 100644 index 406da2f715..0000000000 Binary files a/docs/source/deployment/security_checklist/ssh_connection_fail.png and /dev/null differ diff --git a/docs/source/deployment/setup_context.md b/docs/source/deployment/setup_context.md new file mode 100644 index 0000000000..fb159b82a7 --- /dev/null +++ b/docs/source/deployment/setup_context.md @@ -0,0 +1,39 @@ +(setup_context)= + +# Set up context + +The 'context' contains some basic metadata about your TRE deployment. +It defines the name of your TRE and the subscription where the supporting resources should be deployed. + +:::{important} +The context **must** be setup before any TRE components can be deployed. +::: + +## Configuration + +A local context configuration file (`context.yaml`) holds the information necessary to find and access a context. + +:::{note} +You can specify the directory where your context configuration (`context.yaml`) is stored by setting the environment variable `DSH_CONFIG_DIRECTORY`. +::: + +## Creating a context + +- You will need to provide some options to set up your DSH context. You can see what these are by running the following: + +:::{code} shell +$ dsh context add --help +::: + +- Run a command like the following to create your local context file. + +:::{code} shell +$ dsh context add --admin-group-name AZURE_SECURITY_GROUP_CONTAINING_ALL_ADMINISTRATORS \ + --name NAME_WITH_ONLY_LETTERS_NUMBERS_AND_UNDERSCORES \ + --description HUMAN_FRIENDLY_DESCRIPTION_OF_YOUR_TRE \ + --subscription-name AZURE_SUBSCRIPTION_NAME +::: + +:::{note} +If you have multiple contexts defined, you can select which context you want to use with `dsh context switch CONTEXT_NAME`. +::: diff --git a/docs/source/deployment/snippets/00_symbols.partial.md b/docs/source/deployment/snippets/00_symbols.partial.md deleted file mode 100644 index 975fbf6e4d..0000000000 --- a/docs/source/deployment/snippets/00_symbols.partial.md +++ /dev/null @@ -1,58 +0,0 @@ -## Explanation of symbols used in this guide - -````{admonition} Powershell command -![Powershell: estimate of time needed](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=estimate%20of%20time%20needed) - -- This indicates a `Powershell` command which you will need to run locally on your machine -- Ensure you have checked out (or downloaded) the appropriate tag of the Safe Haven repository from [https://github.com/alan-turing-institute/data-safe-haven](https://github.com/alan-turing-institute/data-safe-haven). -- Open a `Powershell` terminal and navigate to the indicated directory of your locally checked-out version of the Safe Haven repository -- Ensure that you are logged into Azure by running the `Connect-AzAccount` command - ```{tip} - If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into. - ``` -- This command will give you a URL and a short alphanumeric code. -- Go to URL in a web browser, enter the code and log in to your account on Azure. - ```{tip} - If you have several Azure accounts, make sure you use one that has permissions to make changes to the subscription you are using - ``` -```` - -````{admonition} Remote command -![Remote: estimate of time needed](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=estimate%20of%20time%20needed) - -- This indicates a command which you will need to run remotely on an Azure virtual machine (VM) using `Microsoft Remote Desktop` -- Open `Microsoft Remote Desktop` and click `Add Desktop` / `Add PC` -- Enter the private IP address of the VM that you need to connect to in the `PC name` field (this can be found by looking in the Azure portal) -- Enter the name of the VM (for example `DC1-SHM-PROJECT`) in the `Friendly name` field -- Click `Add` -- Ensure you are connected to the SHM VPN that you have set up -- Double click on the desktop that appears under `Saved Desktops` or `PCs`. -- Use the `username` and `password` specified by the appropriate section of the guide - -```{tip} -If you see a warning dialog that the certificate cannot be verified as root, accept this and continue. -``` -```` - -```{admonition} Azure Portal operation -![Portal: estimate of time needed](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=estimate%20of%20time%20needed) - -- This indicates an operation which needs to be carried out in the [`Azure Portal`](https://portal.azure.com) using a web browser on your local machine. -- You will need to login to the portal using an account with privileges to make the necessary changes to the resources you are altering -``` - -```{admonition} Microsoft Entra ID operation -![Microsoft Entra ID: estimate of time needed](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=estimate%20of%20time%20needed) - -- This indicates an operation which needs to be carried out in the [`Azure Portal`](https://portal.azure.com) using a web browser on your local machine. -- You will need to login to the portal using an account with administrative privileges on the `Microsoft Entra ID` that you are altering. -- Note that this might be different from the account which is able to create/alter resources in the Azure subscription where you are building the Safe Haven. -``` - -```{admonition} OS-dependent steps -The following icons indicate steps that depend on the OS you are using to deploy the SHM - -- ![macOS](https://img.shields.io/badge/-555?&logo=apple&logoColor=white) **MacOS** -- ![Windows](https://img.shields.io/badge/-555?&logo=windows&logoColor=white) **Windows** -- ![Linux](https://img.shields.io/badge/-555?&logo=linux&logoColor=white) **Linux** -``` diff --git a/docs/source/deployment/snippets/01_prerequisites.partial.md b/docs/source/deployment/snippets/01_prerequisites.partial.md deleted file mode 100644 index c644b33052..0000000000 --- a/docs/source/deployment/snippets/01_prerequisites.partial.md +++ /dev/null @@ -1,57 +0,0 @@ -- An `SHM environment` that has already been deployed in Azure - - Follow the {ref}`Safe Haven Management (SHM) deployment guide ` if you have not done so already. -- All {ref}`prerequisites needed for deploying the SHM `. -- An [Azure subscription](https://portal.azure.com) with sufficient credits to build the environment in: we recommend around $1,000 as a reasonable starting point. - - This can be the same or different from the one where the SHM is deployed - - ```{tip} - - Ensure that the **Owner** of the subscription is an `Azure Security group` that contains all administrators and no-one else. - - We recommend using separate `Microsoft Entra IDs` for users and administrators - ``` - -- Access to a **global administrator** account on the SHM Microsoft Entra ID - -### {{beginner}} Software - -- `PowerShell` with support for Azure - - We recommend [installing](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell) the [latest stable release](https://learn.microsoft.com/en-us/powershell/scripting/install/powershell-support-lifecycle?view=powershell-7.4) of Powershell. We have most recently tested deployment using version `7.4.1`. - - Install the [Azure PowerShell Module](https://docs.microsoft.com/en-us/powershell/azure/install-az-ps) using `Install-Module -Name Az -RequiredVersion 5.0.0 -Repository PSGallery` -- `Microsoft Remote Desktop` - - On macOS this can be installed from the [Apple store](https://www.apple.com/app-store/) -- `OpenSSL` - - Install using your package manager of choice - -````{hint} -If you run: - -```powershell -PS> Start-Transcript -Path
-``` - -before you start your deployment and - -```powershell -PS> Stop-Transcript -``` - -afterwards, you will automatically get a full log of the Powershell commands you have run. -```` - -### {{key}} VPN connection to the SHM VNet - -For some operations, you will need to log on to some of the VMs that you deploy and make manual changes. -This is done using the VPN which should have been deployed {ref}`when setting up the SHM environment `. - -### {{name_badge}} SRE domain name - -You will need access to a public routable domain name for the SRE and its name servers. -This can be a subdomain of the Safe Haven Management domain, e.g, `sandbox.project.turingsafehaven.ac.uk`, or a top-level domain (eg. `mydatasafehaven.co.uk` ). - -### {{arrow_double_up}} Deploying multiple SREs in parallel - -```{important} -You can only deploy to **one SRE at a time** from a given computer as the `Az` Powershell module can only work within one Azure subscription at a time. -``` - -If you need to deploy multiple SREs in parallel you will need to use multiple computers. -These can be different physical computers or you can provision dedicated deployment VMs - this is beyond the scope of this guide. diff --git a/docs/source/deployment/snippets/02_configuration.partial.md b/docs/source/deployment/snippets/02_configuration.partial.md deleted file mode 100644 index 32ebec2242..0000000000 --- a/docs/source/deployment/snippets/02_configuration.partial.md +++ /dev/null @@ -1,84 +0,0 @@ -The full configuration details for a new SRE are generated by defining a few "core" properties for the new SRE and the management environment in which it will be deployed. - -### Secure research environment ID - -Choose a short ID `` to identify the secure research environment (e.g. `sandbox`). -This can have a **maximum of seven alphanumeric characters**. - -### {{apple}} SHM configuration properties - -The core properties for the relevant pre-existing Safe Haven Management (SHM) environment must be defined in a JSON file named `shm__core_config.json` in the `environment_configs` folder. -Please {ref}`read the instructions ` to find out what to put in this file. - -### {{green_apple}} SRE configuration properties - -The core properties for the secure research environment (SRE) must be defined in a JSON file named `sre__core_config.json` in the `environment_configs` folder. -The following core SRE properties are required - look in the `environment_configs` folder to see some examples. - -```json -{ - "sreId": "The that you decided on above (eg. 'sandbox').", - "tier": "The data classification tier for the SRE. This controls the outbound network restrictions on the SRE and which mirror set the SRE is peered with", - "shmId": "The that you decided on above (eg. 'testa').", - "subscriptionName": "Azure subscription that the SRE will be deployed into.", - "ipPrefix": "The three octet IP address prefix for the Class A range used by the SRE. See suggestion below on how to set this", - "inboundAccessFrom": "A comma-separated string of IP ranges (addresses or CIDR ranges) from which access to the RDS webclient is permitted. See tip default below for suggestion on how to set this.", - "outboundInternetAccess": "Whether to allow outbound internet access from inside the remote desktop environment. Either ('Yes', 'Allow', 'Permit'), ('No', 'Deny', 'Forbid') or 'default' (for Tier 0 and 1 'Allow' otherwise 'Deny')", - "computeVmImage": { - "type": "The name of the SRD image (most commonly 'Ubuntu')", - "version": "The version of the SRD image (e.g. 0.1.2019082900)" - }, - "remoteDesktopProvider": "[Deprecated] Only 'ApacheGuacamole' is supported. If this parameter is not supplied, it will default to 'ApacheGuacamole'", - "azureAdminGroupName": "[Optional] Azure Security Group that admins of this SRE will belong to. If not specified then the same one as the SHM will be used.", - "dataAdminIpAddresses": "A list of one or more IP addresses which admins will be using to transfer sensitive data to/from the secure Azure storage area (if not specified then Turing IP addresses will be used).", - "databases": "[Optional] A list of zero or more database flavours from the following list ('MSSQL', 'PostgreSQL'). For example ['MSSQL', 'PostgreSQL'] would deploy both an MS-SQL and a PostgreSQL database.", - "deploymentIpAddresses": "[Optional] A list of one or more IP addresses which admins will be using when deploying the SRE (if not specified then deployment commands from any IP address will be permitted).", - "domain": "[Optional] The fully qualified domain name for the SRE. If not specified then . will be used.", - "overrides": "[Optional, Advanced] Do not use this unless you know what you're doing! If you want to override any of the default settings, you can do so by creating the same JSON structure that would be found in the final config file and nesting it under this entry. For example, to change the name of the Key Vault secret containing the MSSQL admin password, you could use something like: 'sre: { databases: { dbmssql: { adminPasswordSecretName: my-password-name } } }'" -} -``` - -```{tip} -We recommend the following for the `inboundAccessFrom` setting -- Tier 0/1 SREs: this can be set to `Internet`, allowing access from anywhere. -- Tier 2 SREs: this should correspond to the IP addresses of **organisational networks** (including guest networks) for all approved partner organisations (i.e. specific networks managed by the organisation, such as `EduRoam`, `Turing Guest`, `Turing Secure`) -- Tier 3 SREs: this should correspond to the IP addresses of **restricted networks** for all approved partner organisations. These should only permit connections from within medium security access controlled physical spaces and from managed devices (e.g. `Turing Secure`). -``` - -```{important} -The `ipPrefix` must be unique for each SRE attached to the same SHM. Each SRE needs a range of 2048 IP address (a `/21` range in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)) in a [private IP range](https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses). -The config itself expects the first three digits denoting the range (e.g. `"ipPrefix": "10.11.0.0"` rather than `"ipPrefix": "10.11.0.0/21"`) -It is important that the range chosen doesn't overlap with the SHM (by default `10.0.0.0 - 10.0.7.255`), the package repositories (by default `10.10.2.0-10.10.3.255`) or any other SRE. -You may find [this tool](https://www.ipaddressguide.com/cidr) helpful to convert between IP address ranges and CIDRs. -``` - -```{admonition} Alan Turing Institute default -We assign consecutive `/21` ranges starting from `10.11.0.0/21` (ie. the first three SREs will use `10.11.0.0/21`, `10.11.8.0/21` and `10.11.16.0/21`). -``` - -### (Optional) Verify code version - -If you have cloned/forked the code from our `GitHub` repository, you can confirm which version of the Data Safe Haven you are currently using by running the following commands: - -![Powershell: a few seconds](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20seconds) - -```powershell -PS> git tag --list | Select-String $(git describe --tags) -``` - -This will check the tag you are using against the list of known tags and print it out. -You can include this confirmation in any record you keep of your deployment. - -### (Optional) {{full_moon}} View full SRE configuration - -A full configuration, which will be used in subsequent steps, will be automatically generated from your core configuration. -Should you wish to, you can print the full SRE config by running the following Powershell command: - -![Powershell: a few seconds](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20seconds) at {{file_folder}} `./deployment` - -```powershell -PS> ./ShowConfigFile.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE diff --git a/docs/source/deployment/snippets/03_01_remove_data.partial.md b/docs/source/deployment/snippets/03_01_remove_data.partial.md deleted file mode 100644 index 582f124040..0000000000 --- a/docs/source/deployment/snippets/03_01_remove_data.partial.md +++ /dev/null @@ -1,18 +0,0 @@ -```{caution} -If you are redeploying an SRE in the same subscription and did not use the `./SRE_Teardown.ps1` script to clean up the previous deployment, then there may be residual SRE data in the SHM. -``` - -This script will remove any such data. - -```{note} -If the subscription is not empty, confirm that it is not being used before deleting any resources in it. -``` - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Remove_SRE_Data_From_SHM.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE diff --git a/docs/source/deployment/snippets/03_02_register_sre.partial.md b/docs/source/deployment/snippets/03_02_register_sre.partial.md deleted file mode 100644 index 7ad7afee7d..0000000000 --- a/docs/source/deployment/snippets/03_02_register_sre.partial.md +++ /dev/null @@ -1,10 +0,0 @@ -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Key_Vault_And_Users.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -This step will register service accounts with the SHM and also create a Key Vault in the SRE subscription (at `Resource Groups > RG_SHM__SRE__SECRETS > kv--sre-`). diff --git a/docs/source/deployment/snippets/04_01_sre_dns.partial.md b/docs/source/deployment/snippets/04_01_sre_dns.partial.md deleted file mode 100644 index f15406f840..0000000000 --- a/docs/source/deployment/snippets/04_01_sre_dns.partial.md +++ /dev/null @@ -1,8 +0,0 @@ -![Powershell: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=one%20minute) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_DNS_Zone.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE diff --git a/docs/source/deployment/snippets/04_02_manual_dns.partial.md b/docs/source/deployment/snippets/04_02_manual_dns.partial.md deleted file mode 100644 index c2ad04b62f..0000000000 --- a/docs/source/deployment/snippets/04_02_manual_dns.partial.md +++ /dev/null @@ -1,23 +0,0 @@ -````{error} -If you see a message `You need to add the following NS records to the parent DNS system for...` you will need to manually add the specified NS records to the parent's DNS system, as follows: - -
Manual DNS configuration instructions - -![Portal: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=one%20minute) - -- To find the required values for the NS records on the portal, click `All resources` in the far left panel, search for "DNS Zone" and locate the DNS Zone with SRE's domain. The NS record will list 4 Azure name servers. - ```{image} deploy_shm/shm_subdomain_ns.png - :alt: SHM NS record - :align: center - ``` -- Duplicate these records to the parent DNS system as follows: - - If the parent domain has an Azure DNS Zone, create an NS record set in this zone. - - The name should be set to the subdomain (e.g. `sandbox` ) or `@` if using a custom domain, and the values duplicated from above. - - For example, for a new subdomain `sandbox.testa.dsgroupdev.co.uk` , duplicate the NS records from the Azure DNS Zone `sandbox.testa.dsgroupdev.co.uk` to the Azure DNS Zone for `testa.dsgroupdev.co.uk`, by creating a record set with name `sandbox`. - ```{image} deploy_sre/sre_subdomain_ns.png - :alt: SRE NS record - :align: center - ``` - - If the parent domain is outside of Azure, create NS records in the registrar for the new domain with the same value as the NS records in the new Azure DNS Zone for the domain. -
-```` diff --git a/docs/source/deployment/snippets/04_03_deploy_vnet.partial.md b/docs/source/deployment/snippets/04_03_deploy_vnet.partial.md deleted file mode 100644 index 8f94284480..0000000000 --- a/docs/source/deployment/snippets/04_03_deploy_vnet.partial.md +++ /dev/null @@ -1,12 +0,0 @@ -![Powershell: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=five%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Networking.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -```{note} -The VNet peerings may take a few minutes to provision after the script completes. -``` diff --git a/docs/source/deployment/snippets/05_storage_accounts.partial.md b/docs/source/deployment/snippets/05_storage_accounts.partial.md deleted file mode 100644 index 7a9f88ba81..0000000000 --- a/docs/source/deployment/snippets/05_storage_accounts.partial.md +++ /dev/null @@ -1,10 +0,0 @@ -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Storage_Accounts.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -This script will create a storage account in the `RG_SHM__PERSISTENT_DATA` resource group, a corresponding private end point in `RG_SRE__NETWORKING` and will configure the DNS zone of the storage account to the right IP address. diff --git a/docs/source/deployment/snippets/06_01_create_user_account.partial.md b/docs/source/deployment/snippets/06_01_create_user_account.partial.md deleted file mode 100644 index 5b94280c6d..0000000000 --- a/docs/source/deployment/snippets/06_01_create_user_account.partial.md +++ /dev/null @@ -1,50 +0,0 @@ -These steps ensure that you have created a non-privileged user account that you can use for testing. -You must ensure that you have assigned a licence to this user in the Microsoft Entra ID so that MFA will work correctly. - -You should have already set up a non-privileged user account upon setting up the SHM, when {ref}`validating the active directory synchronisation `, but you may wish to set up another or verify that you have set one up already: - -
-Set up a non-privileged user account - -![Remote: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=five%20minutes) - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the connection details that you previously used to {ref}`log into this VM `. -- Follow the user creation instructions from the {ref}`SHM deployment guide ` (everything under the `Validate Active Directory synchronisation` header). In brief these involve: - - adding your details (ie. your first name, last name, phone number etc.) to a user details CSV file. - - running `C:\Installation\CreateUsers.ps1 ` in a Powershell command window with elevated privileges. -- This will create a user in the local Active Directory on the SHM domain controller and start the process of synchronisation to the Azure Active Directory, which will take around 5 minutes. - -
- -
-Ensure that your non-privileged user account is in the correct Security Group - -![Remote: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=five%20minutes) - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the connection details that you previously used to {ref}`log into this VM `. -- In Server Manager click `Tools > Active Directory Users and Computers` -- In `Active Directory Users and Computers`, expand the domain in the left hand panel click `Safe Haven Security Groups` -- Right click the `SG Research Users` security group and select `Properties` -- Click on the `Members` tab. -- If your user is not already listed here you must add them to the group - - Click the `Add` button - - Enter the start of your username and click `Check names` - - Select your username and click `Ok` - - Click `Ok` again to exit the `Add users` dialogue -- Synchronise with Microsoft Entra ID by running following the `Powershell` command on the SHM primary domain controller - -```powershell -PS> C:\Installation\Run_ADSync.ps1 -``` - -### {{closed_lock_with_key}} Ensure that your non-privileged user account has MFA enabled - -Switch to your custom Microsoft Entra ID in the Azure portal and make the following checks: - -![Microsoft Entra ID: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=one%20minute) - -- From the Azure portal, navigate to the Microsoft Entra ID you have created. -- The `Usage Location` must be set in Microsoft Entra ID (should be automatically synchronised from the local Active Directory if it was correctly set there) - - Navigate to `Microsoft Entra ID > Manage / Users > (user account)`, and ensure that `Settings > Usage Location` is set. -- A licence must be assigned to the user. - - Navigate to `Microsoft Entra ID > Manage / Users > (user account) > Licenses` and verify that a license is assigned and the appropriate MFA service enabled. diff --git a/docs/source/deployment/snippets/07_deploy_webapps.partial.md b/docs/source/deployment/snippets/07_deploy_webapps.partial.md deleted file mode 100644 index edf1e35b79..0000000000 --- a/docs/source/deployment/snippets/07_deploy_webapps.partial.md +++ /dev/null @@ -1,8 +0,0 @@ -![Powershell: fifty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=fifty%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_WebApp_Servers.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE diff --git a/docs/source/deployment/snippets/08_databases.partial.md b/docs/source/deployment/snippets/08_databases.partial.md deleted file mode 100644 index 669ab29fac..0000000000 --- a/docs/source/deployment/snippets/08_databases.partial.md +++ /dev/null @@ -1,15 +0,0 @@ -![Powershell: up to seventy minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=up%20to%20seventy%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Databases.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -This will deploy any databases that you specified in the core config file. The time taken will depend on which (if any) databases you chose. - -```{important} -- The deployment of an `MS-SQL` database will take **around 60 minutes** to complete. -- The deployment of a `PostgreSQL` database will take **around 10 minutes** to complete. -``` diff --git a/docs/source/deployment/snippets/09_single_srd.partial.md b/docs/source/deployment/snippets/09_single_srd.partial.md deleted file mode 100644 index d6143858da..0000000000 --- a/docs/source/deployment/snippets/09_single_srd.partial.md +++ /dev/null @@ -1,24 +0,0 @@ -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Add_Single_SRD.ps1 -shmId -sreId -ipLastOctet [-vmSize ] -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE -- where `` is last octet of the IP address -- [optional] where `` is the [Azure VM size](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) for this SRD - -This will deploy a new SRD into the SRE environment. - -```{tip} -If this SRE needs additional software or settings that are not in your default VM image, you can create a custom cloud init file on your **deployment machine**. - -- By default, SRD deployments will use the `cloud-init-srd.mustache.yaml` configuration file in the `deployment/secure_research_environment/cloud_init/` folder. This does all the necessary steps to configure the VM to work with LDAP. -- If you require additional steps to be taken at deploy time while the VM still has access to the internet (e.g. to install some additional project-specific software), copy the default cloud init file to a file named `cloud-init-srd-shm--sre-.mustache.yaml` in the same folder and add any additional required steps in the `SRE-SPECIFIC COMMANDS` block marked with comments. -``` - -```{admonition} Alan Turing Institute default -- CPU-based VMs are deployed with the next unused last octet in the range `160` to `179` -- GPU-based VMs are deployed with the next unused last octet in the range `180` and `199` -``` diff --git a/docs/source/deployment/snippets/10_network_lockdown.partial.md b/docs/source/deployment/snippets/10_network_lockdown.partial.md deleted file mode 100644 index 6d53845061..0000000000 --- a/docs/source/deployment/snippets/10_network_lockdown.partial.md +++ /dev/null @@ -1,10 +0,0 @@ -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Apply_SRE_Network_Configuration.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -This will apply the locked-down network settings which will restrict access into/out of this SRE. diff --git a/docs/source/deployment/snippets/11_configure_firewall.partial.md b/docs/source/deployment/snippets/11_configure_firewall.partial.md deleted file mode 100644 index ea497a27e4..0000000000 --- a/docs/source/deployment/snippets/11_configure_firewall.partial.md +++ /dev/null @@ -1,10 +0,0 @@ - - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Firewall.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE diff --git a/docs/source/deployment/snippets/12_configure_monitoring.partial.md b/docs/source/deployment/snippets/12_configure_monitoring.partial.md deleted file mode 100644 index 88c26b4d87..0000000000 --- a/docs/source/deployment/snippets/12_configure_monitoring.partial.md +++ /dev/null @@ -1,19 +0,0 @@ -![Powershell: thirty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=thirty%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Monitoring.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -````{error} -As installing the logging agent can take several minutes, it is possible that some of the commands run in this script might time out. -The script should automatically retry any that fail, if you see any failure messages, please re-run: - -```powershell -PS> ./Setup_SRE_Monitoring.ps1 -shmId $shmId -sreId $sreId -``` - -this will attempt to install the extensions again, skipping any VMs that already have the extensions installed. -```` diff --git a/docs/source/deployment/snippets/13_enable_backup.partial.md b/docs/source/deployment/snippets/13_enable_backup.partial.md deleted file mode 100644 index 7421f453d0..0000000000 --- a/docs/source/deployment/snippets/13_enable_backup.partial.md +++ /dev/null @@ -1,10 +0,0 @@ -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_Backup.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -This will enable regular backups for the persistent data storage accounts, both ingress and egress data. diff --git a/docs/source/deployment/snippets/14_run_smoke_tests.partial.md b/docs/source/deployment/snippets/14_run_smoke_tests.partial.md deleted file mode 100644 index 4bc8bca0b5..0000000000 --- a/docs/source/deployment/snippets/14_run_smoke_tests.partial.md +++ /dev/null @@ -1,13 +0,0 @@ -These tests should be run **after** the network lock down and peering the SRE and package mirror VNets. -They are automatically uploaded to the SRD during the deployment step. - -![Remote: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=five%20minutes) - -- Use the remote desktop interface at `https://.` to log in to the **SRD** (`SRE---`) that you have deployed using the scripts above -- Open a terminal session -- Enter the test directory using `cd /opt/tests` -- Run `bats run_all_tests.bats`: - - if any of the tests fail, check the `README.md` in this folder for help in diagnosing the issues -- Copy `tests/test_jupyter.ipynb` to your home directory - - activate each of the available Python versions in turn - - run `jupyter notebook` in each case and check that you can run the notebook and that all versions and paths match throughout. See [Available Python and R versions](/roles/researcher/user_guide.md#available-python-and-r-versions) diff --git a/docs/source/deployment/snippets/user_csv_format.partial.md b/docs/source/deployment/snippets/user_csv_format.partial.md deleted file mode 100644 index 42940bf20d..0000000000 --- a/docs/source/deployment/snippets/user_csv_format.partial.md +++ /dev/null @@ -1,51 +0,0 @@ -- Make a new copy of the user details template file from `C:\Installation\user_details_template.csv` - - ```{tip} - We suggest naming this `YYYYDDMM-HHMM_user_details.csv` but this is up to you - ``` - -- Remove the example user and add the required details for each user - - - `SamAccountName`: Log in username **without** the `@` part. - - ```{tip} - We recommend using `firstname.lastname` format. - ``` - - ```{warning} - Ensure that `SamAccountName` has a maximum of **20 characters** from the 7-bit ASCII set (unaccented letters, numbers and some punctuation) or synchronisation will fail. - ``` - - - `GivenName`: User's first / given name - - ```{warning} - Ensure that `GivenName` uses only characters from the 7-bit ASCII set (unaccented letters, numbers and some punctuation) or synchronisation will fail. - ``` - - - `Surname`: User's last name / surname - - ```{warning} - Ensure that `Surname` uses only characters from the 7-bit ASCII set (unaccented letters, numbers and some punctuation) or synchronisation will fail. - ``` - - - `Mobile`: Phone number to use for initial password reset. - - ```{important} - - This must include country code in the format `+ ` (e.g. `+44 7123456789`). - - Include a space between the country code and local number parts but no other spaces. - - Remove the leading `0` from local number if present. - - This can be a landline or or mobile but must be accessible to the user when resetting their password and setting up MFA. - - Users can add the authenticator app and/or additional phone numbers during MFA self-registration. - ``` - - - `SecondaryEmail`: An existing organisational email address for the user. - - ```{note} - This is **not** uploaded to their Data Safe Haven user account but is needed when sending account activation messages. - ``` - - - `GroupName`: The name of the {ref}`security_groups` that the users should be added to. Users will need to be added to the relevant security group before they can access an SRE. You can also change this manually later on (see {ref}`adding_users_manually`). - - ```{tip} - If the user needs to be added to multiple groups, separate them with a pipe-character ( `|` ). - ``` diff --git a/docs/source/design/architecture/architecture_mirrors.png b/docs/source/design/architecture/architecture_mirrors.png deleted file mode 100644 index e5fe16c2e8..0000000000 Binary files a/docs/source/design/architecture/architecture_mirrors.png and /dev/null differ diff --git a/docs/source/design/architecture/index.md b/docs/source/design/architecture/index.md deleted file mode 100644 index bf8802d8f8..0000000000 --- a/docs/source/design/architecture/index.md +++ /dev/null @@ -1,35 +0,0 @@ -(design_architecture)= - -# Architecture - -```{toctree} -:hidden: true -:maxdepth: 2 - -shm_details.md -sre_details.md -``` - -The Data Safe Haven is designed to be deployed on the [Microsoft Azure](https://azure.microsoft.com/en-gb/) platform taking advantage of its cloud-computing infrastructure. - -Each deployment of the Data Safe Haven consists of two components: - -- one **Safe Haven Management** (SHM) component -- one or more **Secure Research Environments** (SREs). - -```{image} safe_haven_architecture.png -:alt: High-level architecture -:align: center -``` - -The SHM controls the authentication process for the infrastructure. -The identity provider is Microsoft Active Directory, which is synchronized with Microsoft Entra ID to provide cloud and multifactor authentication into the individual project Secure Research Environment (SRE). - -The SHM is connected to each SRE through virtual network peering, which allows authentication requests from the SRE servers to be resolved by the SHM Active Directory. -Although all SREs are peered with the SHM, they are not able to connect directly to one another, ensuring the isolation of each project. - -[Safe Haven Management (SHM)](shm_details.md) -: details about the design of the SHM component - -[Secure Research Environment (SRE)](sre_details.md) -: details about the design of the SRE component diff --git a/docs/source/design/architecture/safe_haven_architecture.png b/docs/source/design/architecture/safe_haven_architecture.png deleted file mode 100644 index 2da6360737..0000000000 Binary files a/docs/source/design/architecture/safe_haven_architecture.png and /dev/null differ diff --git a/docs/source/design/architecture/shm_architecture.png b/docs/source/design/architecture/shm_architecture.png deleted file mode 100644 index 77b2acdab2..0000000000 Binary files a/docs/source/design/architecture/shm_architecture.png and /dev/null differ diff --git a/docs/source/design/architecture/shm_details.md b/docs/source/design/architecture/shm_details.md deleted file mode 100644 index 49ca48d487..0000000000 --- a/docs/source/design/architecture/shm_details.md +++ /dev/null @@ -1,69 +0,0 @@ -# Safe Haven Management design - -The user and machine management services for the Safe Haven are hosted on a separate subscription to any of the secure project environments. -These services include the primary authentication providers, VM images, package mirrors and proxies, and other services that contribute to the smooth running of the service. -This provides a centralised management facility, ensuring consistency across all secure project environments. - -## Architecture - -```{image} shm_architecture.png -:alt: Safe Haven Management (SHM) component -:align: center -``` - -Within the Management segment all authentication services are contained within a single virtual network (VNet). -The Windows Servers are running Active Directory and are acting as Domain Controllers. -They are configured within an Azure availability set to ensure maximum up time. -The Domain Controllers synchronise user details to the Microsoft Entra ID that is associated with the Management subscription to support self-service account activation and password reset. - -Network security is provided by Azure Network Security Groups that ensure that inbound connections from the SREs are limited to Active Directory and RADIUS traffic. - -For management of the environment there is an Azure point-to-site (P2S) VPN service configured. -Access to this is limited to the Data Safe Haven administration team. - -## Package proxies and mirrors - -For {ref}`policy_tier_2` and {ref}`policy_tier_3` SREs, a selection of Python and R packages can be provided via a set of servers that provide access to some or all of the packages available from the public PyPI or CRAN package repositories. -Depending on the configuration of the Data Safe Haven, these packages can be provided via either package proxies or package mirrors. - -- **Package proxies:** These make on-demand connections to the public package repositories for approved packages. The proxy server has access to all packages available on the upstream repositories but limits downstream access to the packages permitted for an SRE. -- **Package mirrors:** These synchronise approved packages through a one-way airlock. This provides an additional level of security compared to package proxies. However, the mirrors must be regularly updated, which can take a long time when mirroring a large amount of package data. - -{ref}`policy_tier_2` environments can access all packages available from the public PyPI and CRAN repositories. -{ref}`policy_tier_3` environments can access only a pre-approved subset of PyPI or CRAN packages. -Connection of SREs to either the appropriate proxy or mirror servers is achieved via VNet peering. -While multiple SREs may be peered to the same proxy or mirror VNet, this does not permit communication between SREs. - -### Package proxy security - -When using a package proxy, the proxy server has very restricted outbound internet access. -It is only able to connect to the download endpoints for the upstream package repositories it is providing access to (in our case PyPI for Python packages and CRAN for R packages). -While the proxy server has access to all packages from the upstream repositories, it can be configured to only provide access to a pre-approved subset of these packages and proxy servers for {ref}`policy_tier_3` environments are configured this way. - -If the proxy server was compromised from an SRE, an attacker could access all packages available on the upstream package repositories. -In the Turing's configuration, this would not provide any additional access for {ref}`policy_tier_2` environments, but would allow an attacker from a {ref}`policy_tier_3` environment to install packages not on the limited pre-approved package list. -However, the upstream package repository download endpoints do not allow any data to be uploaded to them, so an attacker would have to separately compromise the upstream package repositories to be able to egress any data through a compromised package proxy. - -### Package mirror security - -Package mirrors provide an additional level of isolation at the network level, deploying two paired mirror servers for each upstream package repository. - -```{image} architecture_mirrors.png -:alt: Package mirrors -:align: center -``` - -The "external" mirror is protected by infrastructure level network controls that do not permit any **inbound** connections at all (not even from the "internal" mirror). -It can call out to the internet to update itself from the official package server and can push updates to the "internal" mirror via rsync. - -The "internal" mirror is protected by infrastructure level network controls that do not permit any **outbound** connections at all (not even to the "external" mirror). -It can receive inbound rsync connections from the "external" mirror to receive package updates and inbound connections from its associated SREs to install packages. - -Between them this pair of servers operate as a one-way "airlock" to allow packages to be pulled from the official package server on the internet and made available to Secure Research Desktops (SRDs) in project SREs, while not permitting the mirrors to be used to connect to the internet from a SRE. -Even if the "internal" mirror was to be compromised, the infrastructure level network controls restricting outbound connections would prevent access to any other resources within the Data Safe Haven or outside of it. - -### Package mirror performance considerations - -As package mirrors need to copy **all** permitted packages, synchronising a large amount of package data can take a long time. -In particular, a full initial download of all packages from PyPI for a {ref}`policy_tier_2` mirror can take two weeks. -Newly approved packages will also only be available after the next mirror synchronisation is complete. diff --git a/docs/source/design/architecture/sre_architecture.png b/docs/source/design/architecture/sre_architecture.png deleted file mode 100644 index 4337f84318..0000000000 Binary files a/docs/source/design/architecture/sre_architecture.png and /dev/null differ diff --git a/docs/source/design/architecture/sre_details.md b/docs/source/design/architecture/sre_details.md deleted file mode 100644 index 03adf9b945..0000000000 --- a/docs/source/design/architecture/sre_details.md +++ /dev/null @@ -1,35 +0,0 @@ -(design_sre)= - -# Secure research environment design - -Each project within the Safe Haven has its own separate Secure Research Environment (SRE). -Each of these SREs is completely isolated from the others and the security configuration of each SRE is set independently to match the needs of the project and Data Provider(s). -Each SRE is assigned to a security tier, depending on the sensitivity of the dataset(s) it will hold and the nature of the analysis to be performed within it. -Each tier is associated with a baseline security configuration, individual elements of which can be adjusted to meet the needs of the project and the Data Provider(s). - -The infrastructure for each SRE is hosted in a separate Azure resource group. -This infrastructure comprises: - -- One or more Secure Research Desktops (SRDs) with a range of data science software preinstalled -- A file server to host the project data -- A `Gitlab` server to provide source code management and version control -- A `CodiMD` server for collaborative writing -- `Apache Guacamole` provides a clientless remote desktop gateway to provide secure remote desktop access to the SRE resources. - -Hosting each secure project environment in its own resource group supports a clean lifecycle management process, making it easy to verifiably delete all project data and resources at the end of a project. - -## Architecture - -```{image} sre_architecture.png -:alt: Secure Research Environment (SRE) component -:align: center -``` - -The SREs use `Apache Guacamole` to provide a secure connection to the resources within the environment. -Only the remote desktop server is accessible from outside the SRE. -Connections to this are made via an SSL/TLS secured connection that requires the user to authenticate using credentials provided by the Data Safe Haven and validated with MFA. - -For {ref}`policy_tier_2` environments and above, outbound internet access from within the SRE is blocked. Access to the SRE remote desktop is only permitted from approved networks, and the remote desktop server prevents the copying of information into or out of the SRE. - -For management of the environment there is an Azure point-to-site (P2S) VPN service configured via the [Safe Haven Management (SHM)](shm_details.md) segment of the Data Safe Haven. -Access to this is limited to the Data Safe Haven administration team. diff --git a/docs/source/design/index.md b/docs/source/design/index.md index e49130d413..26b6fde9fe 100644 --- a/docs/source/design/index.md +++ b/docs/source/design/index.md @@ -1,12 +1,11 @@ # Design -```{toctree} +:::{toctree} :hidden: true :maxdepth: 2 -architecture/index.md security/index.md -``` +::: ## Decisions and constraints @@ -18,8 +17,5 @@ In addition to describing the architecture and technical security controls confi Each organisation deploying their own instance of the Data Safe Haven is responsible for verifying their Data Safe Haven instance is deployed and operates as expected and that the deployed configuration effectively supports the purpose for which they have deployed the Data Safe Haven and their own information governance policies and processes. ``` -[Architecture](architecture/index.md) -: How the Data Safe Haven infrastructure is designed. - [Security](security/index.md) : Details about the security configuration used at the Alan Turing Institute diff --git a/docs/source/design/security/index.md b/docs/source/design/security/index.md index 873fa63577..033880cc4c 100644 --- a/docs/source/design/security/index.md +++ b/docs/source/design/security/index.md @@ -1,14 +1,13 @@ # Security -```{toctree} +:::{toctree} :hidden: true :maxdepth: 2 objectives.md technical_controls.md -software_package_approval.md reference_configuration.md -``` +::: [Security objectives](objectives.md) : Security objectives that the Data Safe Haven is trying to achieve @@ -16,8 +15,5 @@ reference_configuration.md [Built-in technical controls](technical_controls.md) : Default technical controls built-in to the Data Safe Haven -[Software package approval](software_package_approval.md) -: Policy and process for adding packages to the _core_ allowlists - [Turing security configuration](reference_configuration.md) : Security configuration used at The Alan Turing Institute to try to meet these objectives diff --git a/docs/source/design/security/reference_configuration.md b/docs/source/design/security/reference_configuration.md index 8e8a751bb3..5d508cdebd 100644 --- a/docs/source/design/security/reference_configuration.md +++ b/docs/source/design/security/reference_configuration.md @@ -35,7 +35,7 @@ The set of controls applied at the Alan Turing Institute are discussed here, tog ### Turing configuration setting: - Managed devices must be provided by an approved organisation and the user must not have administrator access to them. -- Network rules for higher tier environments permit access only from IP ranges corresponding to `Restricted` networks that only permit managed devices to connect. +- Network rules for higher tier environments permit access only from IP ranges corresponding to **Restricted** networks that only permit managed devices to connect. ### Implication: @@ -49,7 +49,7 @@ The set of controls applied at the Alan Turing Institute are discussed here, tog - Medium security research spaces control the possibility of unauthorised viewing. - Card access or other means of restricting entry to only known researchers (such as the signing in of guests on a known list) is required. - Screen adaptations or desk partitions should be adopted in open-plan spaces if there is a high risk of unauthorised people viewing the user's screen. -- Firewall rules for the SREs only permit access from `Restricted` network IP ranges corresponding to these research spaces. +- Firewall rules for the SREs only permit access from **Restricted** network IP ranges corresponding to these research spaces. ### Implication: @@ -103,7 +103,7 @@ To minimise the risk of unauthorised access to the dataset while the ingress vol ### Turing configuration setting:: - Users can write to the `/output` volume. -- A {ref}`role_system_manager` can view and download data in the `/output` volume via `Azure Storage Explorer`. +- A {ref}`role_system_manager` can view and download data in the `/output` volume via **Azure Storage Explorer**. ### Implication: @@ -147,7 +147,7 @@ To minimise the risk of unauthorised access to the dataset while the ingress vol ### Turing configuration setting: -- An `Azure Firewall` ensures that all VMs within the safe haven have the minimal level of internet access required to function. +- An **Azure Firewall** ensures that all VMs within the safe haven have the minimal level of internet access required to function. ### Implication: diff --git a/docs/source/design/security/software_package_approval.md b/docs/source/design/security/software_package_approval.md deleted file mode 100644 index cf14f4ea6b..0000000000 --- a/docs/source/design/security/software_package_approval.md +++ /dev/null @@ -1,78 +0,0 @@ -# Software approval policy - -To maximise the usability of the secure research environments, we pre-install certain software packages that are deemed broadly useful to a cross section of researchers, thus making them available to all users. - -Other software packages which are only of interest to a subset of users can be made available for download from remote package repositories. -Currently, we support PyPI (Python) and CRAN (R) as remote repositories. - -For higher {ref}`sensitivity tiers ` ({ref}`policy_tier_3` and above), only a subset of packages are made available in this way. -This subset of packages constitutes an "allowlist" of packages that have been vetted to mitigate the risk of introducing malicious or unsound software into the secure environment. - -```{warning} -The Data Safe Haven team manages a default allowlist, but individual deployments may be using their own versions. -Check with your {ref}`role_system_manager` what is being used in your case -``` - -This page sets out the policy for adding software packages to the default allowlist and/or software to the pre-installed software list. -It also describes the procedure that users of the secure environment should follow to request new additions to the allowlist. - -## Background - -Given the safeguards afforded by the safe haven environment, and the separation of responsibilities between its constituent resources, the level of risk associated with the introduction of third party software packages is considered low. -Moreover, access to the environment is carefully controlled and there is a presumption of trust in the individual researchers to whom access is granted. - -Nevertheless, the introduction of any software into the safe haven must be considered against the potential risks of: - -- approved users having access to data to which they shouldn't (e.g. from data mixing) -- unapproved users having access to data (e.g. from a data breach) -- poisoning of data and/or outputs -- resource misuse (allocation of computational resources for unintended or wasteful purposes). - -Such risks may originate unwittingly, from a user who wants to "just get the job done", or from a user, network team member or administrator acting maliciously. - -Specific risks which this policy aims to mitigate include: - -- package name squatting (allowlisting a similarly-named package instead of the intended one) -- privilege escalation attacks (enabling a user to gain elevated access permissions) -- unauthorised data ingress (in particular, it is possible to upload arbitrary data to PyPI without review) - -(package_inclusion_policy)= - -## Policy - -- For each supported repository, three package lists will be maintained: - - a core allowlist of broadly useful packages that should be pre-installed in each environment - - an extra allowlist of packages that may be useful for specific projects - - an expanded list to be made available from the package repositories consisting of the core and extra packages plus their dependencies -- Users may request to add packages to these allowlists via the {ref}`package request procedure `. - - In the interests of improving researcher productivity the aim will be to accommodate such requests, provided there are no outweighing security concerns associated with the package or its dependencies. -- Requests will be reviewed by the project team using the information provided by the user when making the request -- If approved, a requested package will be added to either the core or extra allowlist (as appropriate) - -(package_inclusion_criteria)= - -### Criteria for inclusion in core - -Only software that is considered broadly useful to a cross section of researchers should be included in core. - -To meet this condition, a package should: - -- implement at least one generic (i.e. not domain-specific) statistical algorithm or method, or -- provide support for a cross-cutting analysis technique (e.g. geospatial data analysis, NLP), or -- facilitate data science or software development best practices (e.g. for robustness, correctness, reproducibility), or -- enhance the presentational features of the programming language (e.g. for producing plots, notebooks, articles, websites), or -- enhance the usability of the programming language or development environment (e.g. RStudio, PyCharm) - -(package_request_procedure)= - -## Package request/review procedure - -- A user requests a package by opening a `Software package request` issue on the Data Safe Haven GitHub repository, including responses to the following questions: - - Is this package the mostly widely supported for the intended purpose? - - What will you be able to do with this package that you can't currently do? What alternatives are there? - - What risks to data integrity/security might arise from including this package or its dependencies? -- A member of the project team reviews the request according to the terms of the {ref}`package_inclusion_policy`. -- The reviewer adds their decision (accept/reject) to the issue and notifies the user who made the request. - - If the decision is to reject, the reviewer must include an explanation. Any subsequent request for the same package should address the specific concern raised. - - If the decision is to accept, a pull request should be made that will add the package to the appropriate list. -- Once the pull request is approved, system administrators of any running deployment can decide whether to update to the new allowlist definitions. diff --git a/docs/source/design/security/technical_controls.md b/docs/source/design/security/technical_controls.md index 83065079c6..404e269375 100644 --- a/docs/source/design/security/technical_controls.md +++ b/docs/source/design/security/technical_controls.md @@ -109,6 +109,6 @@ Note that this means that eg. password managers cannot be used to autofill a {re ### Python/R package availability: -- **{ref}`policy_tier_3`:** A pre-agreed allowlist of packages from `CRAN` and `PyPI` (via proxy or local mirror). -- **{ref}`policy_tier_2`:** Anything on `CRAN` or `PyPI` (via proxy or local mirror). +- **{ref}`policy_tier_3`:** A pre-agreed allowlist of packages from **CRAN** and **PyPI** (via proxy or local mirror). +- **{ref}`policy_tier_2`:** Anything on **CRAN** or **PyPI** (via proxy or local mirror). - **{ref}`policy_tier_0` and {ref}`policy_tier_1`:** Direct access to any package repository. diff --git a/docs/source/index.md b/docs/source/index.md index 2021189f67..2383953e7e 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -1,15 +1,16 @@ # The Turing Data Safe Haven -```{toctree} +:::{toctree} :hidden: true :maxdepth: 2 overview/index.md design/index.md deployment/index.md +management/index.md roles/index.md contributing/index.md -``` +::: ```{image} _static/scriberia_diagram.jpg :alt: Data Safe Haven cartoon by Scriberia for The Alan Turing Institute @@ -39,6 +40,8 @@ You can read them through in order or simply jump to the section that you are mo - If you want details about the technical design of the Data Safe Haven. - [**Deployment**](deployment/index.md) - If you want to deploy your own Data Safe Haven. +- [**Management**](management/index.md) + - If you want to manage a Data Safe Haven that you (or someone else) has deployed. - [**Roles**](roles/index.md) - Information about the different user roles in the Data Safe Haven. - Instructions and advice for the actions of different user roles. diff --git a/docs/source/management/index.md b/docs/source/management/index.md new file mode 100644 index 0000000000..363e306165 --- /dev/null +++ b/docs/source/management/index.md @@ -0,0 +1,102 @@ +# Management + +## Add users to the Data Safe Haven + +:::{important} +You will need a full name, phone number, email address and country for each user. +::: + +1. You can add users directly in your Entra tenant, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users). + +2. Alternatively, you can add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`). + - (Optional) you can provide a `Domain` column if you like but this will otherwise default to the domain of your SHM + - {{warning}} **Phone** must be in [E.123 international format](https://en.wikipedia.org/wiki/E.123) + - {{warning}} **CountryCode** is the two letter [ISO 3166-1 Alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements) code for the country where the user is based + +::::{admonition} Example CSV user file +:class: dropdown tip + +:::{code} text +GivenName;Surname;Phone;Email;CountryCode +Sherlock;Holmes;+44800456456;sherlock@holmes.me;GB +John;Watson;+18005550100;john.watson@nhs.uk;GB +::: +:::: + +```{code} shell +$ dsh users add PATH_TO_MY_CSV_FILE +``` + +## Listing available users + +- You can do this from the [Microsoft Entra admin centre](https://entra.microsoft.com/) + + 1. Browse to **{menuselection}`Groups --> All Groups`** + 2. Click on the group named **Data Safe Haven SRE _SRE-NAME_ Users** + 3. Browse to **{menuselection}`Manage --> Members`** from the secondary menu on the left side + +- You can do this at the command line by running the following command: + + ```{code} shell + $ dsh users list YOUR_SRE_NAME + ``` + + which will give output like the following + + ``` + ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓ + ┃ username ┃ Entra ID ┃ SRE YOUR_SRE_NAME ┃ + ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩ + │ ada.lovelace │ x │ │ + │ grace.hopper │ x │ │ + │ sherlock.holmes │ x │ x │ + │ john.watson │ x │ x │ + └──────────────────────────────┴──────────┴───────────────────┘ + ``` + +## Assign existing users to an SRE + +1. You can do this directly in your Entra tenant by adding them to the **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** group, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/groups-view-azure-portal#add-a-group-member). + +2. Alternatively, you can add multiple users from the command line: + + ```{code} shell + $ dsh users register YOUR_SRE_NAME -u USERNAME_1 -u USERNAME_2 + ``` + + where you must specify the usernames for each user you want to add to this SRE. + + :::{important} + Do not include the Entra ID domain part of the username, just the part before the @. + ::: + +## Manually register users for self-service password reset + +:::{tip} +Users created via the `dsh users` command line tool will be automatically registered for SSPR. +::: + +If you have manually created a user and want to enable SSPR, do the following + +- Go to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **Users > All Users** from the menu on the left side +- Select the user you want to enable SSPR for +- On the **Manage > Authentication Methods** page fill out their contact info as follows: + - Ensure that you register **both** a phone number and an email address + - **Phone:** add the user's phone number with a space between the country code and the rest of the number (_e.g._ +44 7700900000) + - **Email:** enter the user's email address here + - Click the **{guilabel}`Save`** icon in the top panel + +## Removing a deployed Data Safe Haven + +- Run the following if you want to teardown a deployed SRE: + +```{code} shell +$ dsh sre teardown YOUR_SRE_NAME +``` + +- Run the following if you want to teardown the deployed SHM: + +```{code} shell +$ dsh shm teardown +``` diff --git a/docs/source/overview/index.md b/docs/source/overview/index.md index bc79edfcbf..ef9aca3d90 100644 --- a/docs/source/overview/index.md +++ b/docs/source/overview/index.md @@ -1,6 +1,6 @@ # Overview -```{toctree} +:::{toctree} :hidden: true :maxdepth: 2 @@ -8,7 +8,7 @@ what_is_dsh.md why_use_dsh.md sensitivity_tiers.md using_dsh.md -``` +::: ## Background and concepts diff --git a/docs/source/overview/sensitivity_tiers.md b/docs/source/overview/sensitivity_tiers.md index 26d669bac5..4aef9a32fe 100644 --- a/docs/source/overview/sensitivity_tiers.md +++ b/docs/source/overview/sensitivity_tiers.md @@ -54,7 +54,7 @@ Non-technical restrictions related to information governance procedures may also Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs. ```{admonition} Organisational networks -At the Turing connections to Tier 2 environments are only permitted from `Organisational` networks managed by the Turing or one of its organisational partners. +At the Turing connections to Tier 2 environments are only permitted from **Organisational** networks managed by the Turing or one of its organisational partners. ``` (policy_tier_3)= @@ -68,7 +68,7 @@ At the Turing connections to Tier 2 environments are only permitted from `Organi Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs. ```{admonition} Restricted networks -At the Turing connections to Tier 3 environments are only permitted from `Restricted` networks that are only accessible by known researchers. +At the Turing connections to Tier 3 environments are only permitted from **Restricted** networks that are only accessible by known researchers. ``` ```{admonition} Physical spaces diff --git a/docs/source/roles/data_provider_representative/index.md b/docs/source/roles/data_provider_representative/index.md index f1d4c4ec45..02f36c9390 100644 --- a/docs/source/roles/data_provider_representative/index.md +++ b/docs/source/roles/data_provider_representative/index.md @@ -2,11 +2,11 @@ # Dataset Provider Representative -```{toctree} +:::{toctree} :hidden: data_ingress.md -``` +::: The Dataset Provider is the organisation that provided the dataset under analysis. The **Dataset Provider Representative** is the contact person chosen by that organisation to liaise with the institution hosting the Data Safe Haven. @@ -20,8 +20,5 @@ It is the duty of the **Dataset Provider Representative** to manage this set of ## Information of interest to data providers -{ref}`Implementation details ` -: An overview of the design and implementation of the Data Safe Haven. Note that this architecture reflects that obtained when following our deployment guide in conjunction with the information governance processes at the Turing. However, each organisation is responsible for verifying the Data Safe Haven is deployed as expected and that the deployed configuration effectively supports their own information governance policies and processes. - {ref}`Turing security configuration ` : Data Safe Haven security configuration used at The Alan Turing Institute. This configuration is presented for information only. The configurations needed by other organisations will depend on their particularly circumstances and their information governance policies and processes. diff --git a/docs/source/roles/index.md b/docs/source/roles/index.md index 110e8e25b2..6a6a5728d7 100644 --- a/docs/source/roles/index.md +++ b/docs/source/roles/index.md @@ -2,7 +2,7 @@ # Roles -```{toctree} +:::{toctree} :hidden: true :glob: :maxdepth: 2 @@ -13,7 +13,7 @@ programme_manager/index.md project_manager/index.md researcher/index.md system_manager/index.md -``` +::: Several aspects of the Data Safe Haven rely on role-based access controls. You will encounter references to these roles at several points in the rest of this documentation. diff --git a/docs/source/roles/investigator/index.md b/docs/source/roles/investigator/index.md index 474457e1e2..acf68ce522 100644 --- a/docs/source/roles/investigator/index.md +++ b/docs/source/roles/investigator/index.md @@ -2,12 +2,12 @@ # Investigator -```{toctree} +:::{toctree} :hidden: data_ingress.md data_egress.md -``` +::: As the research project lead, this individual is responsible for ensuring that project staff comply with the Environment's security policies. Multiple collaborating institutions may have their own lead staff members, and staff members might delegate responsibilities for the SRE to other researchers. diff --git a/docs/source/roles/project_manager/data_ingress.md b/docs/source/roles/project_manager/data_ingress.md index 39292386a0..bc5b2a863c 100644 --- a/docs/source/roles/project_manager/data_ingress.md +++ b/docs/source/roles/project_manager/data_ingress.md @@ -28,5 +28,3 @@ If ingress of new data would change the classification of a project, we suggest ``` At the end of this process they should have classified the project into one of the Data Safe Haven security tiers. - -If there are multiple data providers, please see the guidance for {ref}`roles_system_manager_multiple_providers` diff --git a/docs/source/roles/project_manager/index.md b/docs/source/roles/project_manager/index.md index 77c36b56a7..2a7169538b 100644 --- a/docs/source/roles/project_manager/index.md +++ b/docs/source/roles/project_manager/index.md @@ -2,13 +2,13 @@ # Project Manager -```{toctree} +:::{toctree} :hidden: project_lifecycle.md data_ingress.md data_egress.md -``` +::: A staff member with responsibility for running a particular project. This role could be filled by the {ref}`role_programme_manager`, or a different nominated member of staff within the research institution. diff --git a/docs/source/roles/researcher/index.md b/docs/source/roles/researcher/index.md index dee5a26833..5f091e43ca 100644 --- a/docs/source/roles/researcher/index.md +++ b/docs/source/roles/researcher/index.md @@ -2,12 +2,12 @@ # Researcher -```{toctree} +:::{toctree} :hidden: user_guide.md available_software.md -``` +::: A project member, who analyses data to produce results. We reserve the capitalised term **Researcher** for this role in our user model. diff --git a/docs/source/roles/researcher/user_guide.md b/docs/source/roles/researcher/user_guide.md index c57acf6190..7043df5387 100644 --- a/docs/source/roles/researcher/user_guide.md +++ b/docs/source/roles/researcher/user_guide.md @@ -381,7 +381,7 @@ After going through the account setup procedure, you should have access to: - Multifactor authentication ```{tip} -If you aren't sure about any of these then please return to the [**Set up your account**](#set-up-your-account) section above. +If you aren't sure about any of these then please return to the [**Set up your account**](#-set-up-your-account) section above. ``` ### {{house}} Log into the research environment @@ -470,7 +470,7 @@ Once connected to it, you can analyse the sensitive data belonging to your proje ```{caution} We recommend _not_ including special characters in your password as the keyboard layout expected by the login screen may be different from the one you're using. - - if you want to reset your password, follow the steps defined in the [Password and MFA](#password-and-mfa) section above. + - if you want to reset your password, follow the steps defined in the [Password and MFA](#-password-and-mfa) section above. - if you want to continue with special characters in your password, please test that they are being entered correctly by typing them in the username field. ``` @@ -531,7 +531,7 @@ If you need anything that is not already installed, please discuss this with the ```{attention} This secure research desktop SRD is your interface to a single computer running in the cloud. -You may have access to [additional SRDs](#access-additional-srds) so be careful to check which machine you are working in as files and installed packages may not be the same across the machines. +You may have access to [additional SRDs](#-access-additional-srds) so be careful to check which machine you are working in as files and installed packages may not be the same across the machines. ``` ### {{musical_keyboard}} Keyboard mapping @@ -545,7 +545,7 @@ You may find that the keyboard mapping on your computer is not the same as the o Click on `Desktop` and `Applications > Settings > Keyboard` to change the layout. ```{tip} -We recommend opening an application that allows text entry (such as `Libre Office Writer` , see [Access applications](#access-applications) below) to check what keys the remote desktop thinks you're typing – especially if you need to use special characters. +We recommend opening an application that allows text entry (such as `Libre Office Writer` , see [Access applications](#-access-applications) below) to check what keys the remote desktop thinks you're typing – especially if you need to use special characters. ``` ### {{unlock}} Access applications @@ -676,7 +676,7 @@ Depending on the type of data you are accessing, different `R` and `python` pack - {ref}`Tier 2 ` (medium security) environments have full mirrors of `PyPI` and `CRAN` available. - {ref}`Tier 3 ` (high security) environments only have pre-authorised packages available. -If you need to use a package that is not on the allowlist see the section on how to [bring software or data into the environment](#bring-in-new-files-to-the-sre) below. +If you need to use a package that is not on the allowlist see the section on how to [bring software or data into the environment](#-bring-in-new-files-to-the-sre) below. (role_researcher_user_guide_shared_storage)= @@ -723,7 +723,7 @@ The `/scratch/` folder should be used for any work-in-progress that isn't ready Although everyone in your group will have **read-and-write access**, you can create your own folders inside `/scratch` and choose your own permissions for them. ```{caution} -You should not use `/scratch/` for long-term storage as it can be reset at any time without warning (e.g. when the VM is restarted). +You should not use `/scratch/` for long-term storage as it can be reset at any time without warning (_e.g._ when the VM is restarted). ``` The contents of `/scratch/` will be **different** on different VMs in your SRE. @@ -792,7 +792,7 @@ The Data Safe Haven SRE is hosted on the Microsoft Azure cloud platform. One of the benefits of having cloud based infastructure is that it can be deleted forever when the project is over. Deleting the infrastructure ensures that neither sensitive data nor insights derived from the data or modelling techniques persist. -While working on the project, make sure that every piece of code you think might be useful is stored in a `GitLab` repository within the secure environment. +While working on the project, make sure that every piece of code you think might be useful is stored in a GitLab repository within the secure environment. Any other work should be transferred to the `/shared/` drive so that it is accessible to other TRE users. You can also use the `/backup/` drive to store work that you want to keep safe from accidental deletion. Anything that you think should be considered for **egress** from the environment (eg. images or processed datasets) should be transferred to the shared `/output/` drive. @@ -837,7 +837,7 @@ We recommend that you make your repositories public to facilitate collaboration If you have not used GitLab before: -- There is a small tutorial available as an [Appendix](#appendix-b-gitlab-tutorial-notes) to this user guide. +- There is a small tutorial available as an [Appendix](#-appendix-b-gitlab-tutorial-notes) to this user guide. - You can find the official documentation on the [GitLab website](https://docs.gitlab.com/ee/user/index.html). - Ask your team mates for help. - Ask the designated contact for your SRE. @@ -1282,7 +1282,7 @@ A comparison in terms of features can be found [here](https://usersnap.com/blog/ ### Getting started with Git If you have never used `git` before, you might want to take a look at an introductory guide. -There are multiple `git` cheat sheets such as[this one from the JIRA authors](https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet) and [this interactive one](https://ndpsoftware.com/git-cheatsheet.html) and . +There are multiple `git` cheat sheets such as[this one from the JIRA authors](https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet). ### Repositories diff --git a/docs/source/roles/system_manager/administrator_guide/backup_instances_blobs.png b/docs/source/roles/system_manager/administrator_guide/backup_instances_blobs.png deleted file mode 100644 index d5cd278967..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_instances_blobs.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_instances_disks.png b/docs/source/roles/system_manager/administrator_guide/backup_instances_disks.png deleted file mode 100644 index f760656760..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_instances_disks.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_1.png b/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_1.png deleted file mode 100644 index 72709cc748..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_1.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_2.png b/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_2.png deleted file mode 100644 index a62bad7eb5..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_2.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_3.png b/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_3.png deleted file mode 100644 index 5dc3c3841b..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_progress_disk_3.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_restore_disk.png b/docs/source/roles/system_manager/administrator_guide/backup_restore_disk.png deleted file mode 100644 index af034908b9..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_restore_disk.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_select_containers_validate_blobs.png b/docs/source/roles/system_manager/administrator_guide/backup_select_containers_validate_blobs.png deleted file mode 100644 index 45fa43c763..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_select_containers_validate_blobs.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_select_restore_time_blobs.png b/docs/source/roles/system_manager/administrator_guide/backup_select_restore_time_blobs.png deleted file mode 100644 index 68962558b8..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_select_restore_time_blobs.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_select_snapshot_validate_disks.png b/docs/source/roles/system_manager/administrator_guide/backup_select_snapshot_validate_disks.png deleted file mode 100644 index 2a9359846a..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_select_snapshot_validate_disks.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_swap_disk_after.png b/docs/source/roles/system_manager/administrator_guide/backup_swap_disk_after.png deleted file mode 100644 index 56ab56270d..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_swap_disk_after.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/backup_swap_disk_before.png b/docs/source/roles/system_manager/administrator_guide/backup_swap_disk_before.png deleted file mode 100644 index 373bf1bf57..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/backup_swap_disk_before.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/connect_azure_storage.png b/docs/source/roles/system_manager/administrator_guide/connect_azure_storage.png deleted file mode 100644 index c2fc011eec..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/connect_azure_storage.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/internal_mirror_packages.png b/docs/source/roles/system_manager/administrator_guide/internal_mirror_packages.png deleted file mode 100644 index 5fffefdb7a..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/internal_mirror_packages.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/login_certificate_expiry.png b/docs/source/roles/system_manager/administrator_guide/login_certificate_expiry.png deleted file mode 100644 index e20a1e7594..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/login_certificate_expiry.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/login_password_login.png b/docs/source/roles/system_manager/administrator_guide/login_password_login.png deleted file mode 100644 index 1a76478d00..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/login_password_login.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/no_recent_connections.png b/docs/source/roles/system_manager/administrator_guide/no_recent_connections.png deleted file mode 100644 index 5b333e69c9..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/no_recent_connections.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/password_reset_failure.png b/docs/source/roles/system_manager/administrator_guide/password_reset_failure.png deleted file mode 100644 index 4598d138cd..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/password_reset_failure.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/read_only_sas_token.png b/docs/source/roles/system_manager/administrator_guide/read_only_sas_token.png deleted file mode 100644 index 8778c7e9ca..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/read_only_sas_token.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/srd_login_failure.png b/docs/source/roles/system_manager/administrator_guide/srd_login_failure.png deleted file mode 100644 index b8b65973d9..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/srd_login_failure.png and /dev/null differ diff --git a/docs/source/roles/system_manager/administrator_guide/srd_login_prompt.png b/docs/source/roles/system_manager/administrator_guide/srd_login_prompt.png deleted file mode 100644 index dda78cb4c7..0000000000 Binary files a/docs/source/roles/system_manager/administrator_guide/srd_login_prompt.png and /dev/null differ diff --git a/docs/source/roles/system_manager/index.md b/docs/source/roles/system_manager/index.md index 8bf48dcd4b..1e0c67de52 100644 --- a/docs/source/roles/system_manager/index.md +++ b/docs/source/roles/system_manager/index.md @@ -2,34 +2,10 @@ # System Manager -```{toctree} +:::{toctree} :hidden: -manage_users.md -manage_data.md -manage_costs.md -migrate_an_shm.md -manage_webapps.md -manage_deployments.md -``` +::: Members of technical staff responsible for configuration and maintenance of the Safe Haven. Typically these might be members of an institutional IT team. - -[Managing users](manage_users.md) -: How to manage users for a Data Safe Haven as an administrator. - -[Managing data](manage_data.md) -: How to manage data for a Data Safe Haven as an administrator. - -[Managing costs](manage_costs.md) -: How to manage Azure costs for a Data Safe Haven as an administrator. - -[Migrating an existing SHM](migrate_an_shm.md) -: How to migrate the contents of a previously-deployed SHM to a new one (advanced). - -[Managing web applications](manage_webapps.md) -: How to access virtual machines hosting web applications - -[Managing Data Safe Haven deployments](manage_deployments.md) -: Managing, altering and removing an SRE or a complete deployment as a Data Safe Haven administrator. diff --git a/docs/source/roles/system_manager/manage_costs.md b/docs/source/roles/system_manager/manage_costs.md deleted file mode 100644 index 088ecb5eac..0000000000 --- a/docs/source/roles/system_manager/manage_costs.md +++ /dev/null @@ -1,61 +0,0 @@ -(administrator_manage_costs)= - -# Managing costs - -```{important} -This document assumes that you already have access to a {ref}`Safe Haven Management (SHM) environment ` and one or more {ref}`Secure Research Environments (SREs) ` that are linked to it. -``` - -When and SHM and/or SRE is not being used, it can be cost-efficient to shut it down in order to save on some of the ongoing running costs. - -## {{point_down}} Shut down an SHM or SRE - -Sometimes you may want to temporarily shut down an SHM or SRE, rather than tearing it down entirely. -You can do that with these scripts: - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Then do one or both of the following: - -````{admonition} Shut down SHM -```powershell -PS> ./SHM_Manage_VMs.ps1 -shmId -Action EnsureStopped -Group All -``` -```` - -````{admonition} Shut down SRE -```powershell -./SRE_Manage_VMs.ps1 -shmId -sreId -Action EnsureStopped -``` -```` - -## {{boot}} Start up an SHM or SRE - -If you need to reboot an SHM or SRE that is not running, you can use the same scripts you used to shut them down, but changing the `-Action` flag to `EnsureStopped`, see below. - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Run `./SHM_Manage_VMs.ps1 -shmId -Action EnsureStarted -Group All` to restart the SHM -- For each SRE, run `./SRE_Manage_VMs.ps1 -shmId -sreId -Action EnsureStarted` - -```{warning} -If the Azure subscription that you have deployed into runs out of credit, the SHM and/or SRE will be shutdown automatically. -``` - -## {{anger}} Tear down SHM package mirrors - -During normal usage of the SHM, you should not need to tear down the package mirrors. -However, if you no longer have any SREs at a particular tier and you want to save on the costs of running the mirrors, you might decide to do so. - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team. -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository. -- Ensure you are logged into `Azure` within `Powershell` using the command: `Connect-AzAccount`. This command will give you a URL and a short alphanumeric code. You will need to visit that URL in a web browser and enter the code - - NB. If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into. -- Tear down the package mirrors by running `./SHM_Package_Repository_Teardown.ps1 -shmId -tier `, where `` is the {ref}`management environment ID ` specified in the configuration file. -- This will take **a few minutes** to run. diff --git a/docs/source/roles/system_manager/manage_data.md b/docs/source/roles/system_manager/manage_data.md deleted file mode 100644 index ce3f73787a..0000000000 --- a/docs/source/roles/system_manager/manage_data.md +++ /dev/null @@ -1,271 +0,0 @@ -(administrator_manage_data)= - -# Managing data ingress and egress - -```{important} -This document assumes that you already have access to a {ref}`Safe Haven Management (SHM) environment ` and one or more {ref}`Secure Research Environments (SREs) ` that are linked to it. -``` - -(roles_system_manager_data_ingress)= - -## Data Ingress - -It is the data provider's responsibility to upload the data required by the safe haven. - -```{important} -Any data ingress must be signed off by the {ref}`role_data_provider_representative`, {ref}`role_investigator` and referee (if applicable). -``` - -The following steps show how to generate a temporary write-only upload token that can be securely sent to the data provider, enabling them to upload the data: - -- In the Azure portal select `Subscriptions` then navigate to the subscription containing the relevant SHM -- Search for the resource group: `RG_SHM__PERSISTENT_DATA`, then click through to the storage account called: `data` (where `` is a random string) -- Click `Networking` under `Settings` and paste the data provider's IP address as one of those allowed under the `Firewall` header, then hit the save icon in the top left -- From the `Overview` tab, click the link to `Containers` (in the middle of the page) -- Click `ingress` -- Click `Shared access tokens` under `Settings` and do the following: - - Under `Permissions`, check these boxes: - - `Write` - - `List` - - Set a 24 hour time window in the `Start and expiry date/time` (or an appropriate length of time) - - Leave everything else as default and click `Generate SAS token and URL` - - Copy the `Blob SAS URL` -- Send the `Blob SAS URL` to the data provider via secure email (for example, you could use the [Egress secure email](https://www.egress.com/) service) -- The data provider should now be able to upload data -- You can validate successful data ingress by logging into the SRD for the SRE and checking the `/data` volume, where you should be able to view the data that the data provider has uploaded - -(roles_system_manager_multiple_providers)= - -## Multiple data providers - -In some projects, there may be more than one data provider responsible for uploading data. Two potential issues that may occur are _name clashes_ and _data leakage_. - -If all data providers are uploading to the same storage container, then name clashes may occur. There is no protection against overwriting files during upload. -Thus, if more than one data provider uploads files with the same path, then the earlier upload will be overwritten. -This can be avoided by providing each data provider with their own subfolder on the storage container and ensuring that each uploads only to their subfolder. - -If all data providers are uploading to the same storage container, then they may be able to see the files uploaded by other data providers. -Although they will not be able to access or download these files, a potential issue is that sensitive information may be visible in either the file names or directory structure of the uploaded data. - -If possible, data providers should avoid using any identifying information in the filenames or directory structure of the data that they upload. -This is not always possible, since some data providers may require identifying information to be part of filenames or directory structures. - -An alternative is to provide separate storage containers for upload for each data provider. -These containers should have all the same access restrictions as used for a single ingress storage container. - -After the data has been uploaded, the {ref}`role_system_manager` can transfer the uploaded data to a single storage container accessible to {ref}`role_researcher`s from the relevant SRE, as per the normal data ingress process. -The data-provider-specific containers should be deleted once the data has been transferred. - -(roles_system_manager_software_ingress)= - -## Software Ingress - -Software ingress is performed in a similar manner to data. - -```{important} -Software ingress must go through the same approval process as is the case for data ingress, including sign-off from the {ref}`role_data_provider_representative`, {ref}`role_investigator` and referee (if applicable). -``` - -- Follow the same steps as for {ref}`data ingress ` above to provide temporary write access, but set the time window for the SAS token to a shorter period (e.g. several hours) -- Share the token with the {ref}`role_investigator`, so they can install software within the time window -- The {ref}`role_investigator` can perform software ingress via `Azure Storage Explorer` (for instance as a zip file) - -(roles_system_manager_data_egress)= - -## Data egress - -```{important} -Any data egress must be signed off by the {ref}`role_data_provider_representative`, {ref}`role_investigator` and referee (if applicable). -``` - -```{important} -Assessment of output must be completed **before** an egress link is created. -``` - -The {ref}`role_system_manager` creates a time-limited and IP restricted link to remove data from the environment, after the outputs have been classified and approved for release. - -- In the Azure portal select `Subscriptions` then navigate to the subscription containing the relevant SHM -- Search for the resource group: `RG_SHM__PERSISTENT_DATA`, then click through to the storage account called: `data` (where `` is a random string) -- Click `Networking` under `Settings` to check the list of pre-approved IP addresses allowed under the `Firewall` header - - Ensure that the IP address of the person to receive the outputs is listed and enter it if not -- Click `Containers` under `Data storage` -- Click `egress` -- Click `Shared access tokens` under `Settings` and do the following: - - Under `Permissions`, check these boxes: - - `Read` - - `List` - - Set a time window in the `Start and expiry date/time` that gives enough time for the person who will perform the secure egress download to do so - - Leave everything else as default click `Generate SAS token and URL` - - ```{image} administrator_guide/read_only_sas_token.png - :alt: Read-only SAS token - :align: center - ``` - - - Leave this portal window open and move to the next step -- The appropriate person should now be able to download data - -### The output volume - -Once you have set up the egress connection in `Azure Storage Explorer`, you should be able to view data from the **output volume**, a read-write area intended for the extraction of results, such as figures for publication. -On the SRD, this volume is `/output` and is shared between all SRDs in an SRE. -For more info on shared SRE storage volumes, consult the {ref}`Safe Haven User Guide `. - -## {{file_cabinet}} Backup - -### {{card_file_box}} Restoring blobs - -Blob containers in backed up storage accounts are protected by [operational backup](https://learn.microsoft.com/en-us/azure/backup/blob-backup-overview#how-the-operational-backup-works). -It is possible to restore the state of the blobs to an earlier point in time, up to twelve weeks in the past. - -The blob containers covered by the protection for each SRE are the - -- ingress container (mounted at `/data`) -- egress container (mounted at `/output`) -- backup container (mounted at `/backup`) - -To restore these containers to a previous point in time: - -```{important} -Blobs are restored 'in place'. -The current state will be overwritten by the point which you restore to. -``` - -- In the Azure portal select `Subscriptions` then navigate to the subscription containing the relevant SRE -- Search for the resource group: `RG_SHM__SRE__BACKUP`, then click on the storage account called: `bv--sre-` -- Click `Backup instances` under `Manage` in the left-hand menu -- Ensure that the `Datasource type` filter is set to `Azure Blobs (Azure Storage)` - - ```{image} administrator_guide/backup_instances_blobs.png - :alt: Selecting blob backup instances - :align: center - ``` - -- Click on the storage-account backup instance -- Select a point in the past to restore to and click `Restore` - - ```{image} administrator_guide/backup_select_restore_time_blobs.png - :alt: Selecting blob backup restore point - :align: center - ``` - -- Click on `Next: Restore Parameters` -- You can now choose whether to restore all, or a subset of the containers. In the example below the 'egress' and 'backup' containers are selected -- Click on `Validate` - - ```{image} administrator_guide/backup_select_containers_validate_blobs.png - :alt: Selecting blob containers to restore and validating - :align: center - ``` - -- Click on `Next: Review + restore` -- Click on `Restore` - -### {{optical_disk}} Restoring disks - -Backed up disks have incremental snapshots taken daily. -These snapshots are stored in the backup resource group,`RG_SHM__SRE__BACKUP`. - -The disks covered by the protection for each SRE are the - -- GitLab data disk -- CodiMD data disk -- PostgreSQL data disk -- MSSQL data disk - -To restore a disk: - -```{important} -Restoring a disk creates a new disk object from the incremental snapshots. -You will need to specify where to create the disk and its name. -You will also need to attach the disk to any virtual machines which should use -it and enroll the new disk into the backup system. -``` - -- In the Azure portal select `Subscriptions` then navigate to the subscription containing the relevant SRE -- Search for the resource group: `RG_SHM__SRE__BACKUP`, then click on the storage account called: `bv--sre- -- Click `Backup instances` under `Manage` in the left-hand menu -- Ensure that the `Datasource type` filter is set to `Azure Disks` - - ```{image} administrator_guide/backup_instances_disks.png - :alt: Selecting disk backup instances - :align: center - ``` - -- Click on the disk to restore -- Click `Restore` -- Click `Select restore point` to choose which snapshot to revert to and click `Select`. By default only snapshots from the last 30 days are displayed but this can be adjusted -- Click `Next: Restore Parameters` -- Enter the subscription and resource group in which to create the new disk; these should match the original disk -- Enter a name for the new disk and click `Validate` - - ```{image} administrator_guide/backup_select_snapshot_validate_disks.png - :alt: Configuring and validating disk backup - :align: center - ``` - -- Click on `Next: Review + restore` -- Click on `Restore` -- Wait for the restoration to finish. You can monitor the progress on the backup instance page on the Azure portal - - ```{image} administrator_guide/backup_progress_disk_1.png - :align: center - ``` - - ```{image} administrator_guide/backup_progress_disk_2.png - :align: center - ``` - - ```{image} administrator_guide/backup_progress_disk_3.png - :align: center - ``` - -- Navigate to the resource group where the new disk has been created -- Select the virtual machine that the old disk is attached to and click `Disks` in the left-hand menu -- Take note of the old disks `LUN` -- Remove the old disk by clicking the 'X' at the right-hand side of the disk table -- Click `Save` -- Click `Attach existing disks` and select the disk you restored -- Ensure the restored disk has the same 'LUN' as the old disk -- Click `Save` - - ```{image} administrator_guide/backup_swap_disk_before.png - :alt: The state before swapping in the restored disk - :align: center - ``` - - ```{image} administrator_guide/backup_swap_disk_after.png - :alt: The state after swapping in the restored disk - :align: center - ``` - -- Restart the virtual machine - -### {{optical_disk}} Enrolling restored disks for backup - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Ensure you are logged into `Azure` within `Powershell` using the command: `Connect-AzAccount`. This command will give you a URL and a short alphanumeric code. You will need to visit that URL in a web browser and enter the code -- NB. If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into -- Note the name of the restored disk and the name of the resource group it belongs to -- Run the following script subsituting and with the names of the resource group and disk respectively: - - ```powershell - ./SRE_Enroll_Disk_Backup.ps1 -shmId -sreId -resourceGroup - -diskName - ``` - -## {{package}} Updating allowed repository packages - -For a {ref}`policy_tier_3` SRE, only the packages named in the allowlists at `environment_configs/package_lists/` can be installed by users. - -To update the allowlists on an SHM, you should use the `SHM_Package_Repository_Update_Allowlists.ps1` script. - -```powershell -PS> /deployment/administration/SHM_Package_Repository_Update_Allowlists.ps1 -shmId -``` - -By default, this script will use the allowlists present in `environment_configs/package_lists/` but you may use the `-allowlistDirectory` option to specify another directory containing the allowlists. -It is assumed that the allowlists will have the same names as those in in `environment_configs/package_lists/`. diff --git a/docs/source/roles/system_manager/manage_deployments.md b/docs/source/roles/system_manager/manage_deployments.md deleted file mode 100644 index ee35644158..0000000000 --- a/docs/source/roles/system_manager/manage_deployments.md +++ /dev/null @@ -1,246 +0,0 @@ -(administrator_manage_deployments)= - -# Managing Data Safe Haven deployments - -```{important} -This document assumes that you already have access to a {ref}`Safe Haven Management (SHM) environment ` and one or more {ref}`Secure Research Environments (SREs) ` that are linked to it. -``` - -```{include} ../../deployment/snippets/00_symbols.partial.md -:relative-images: -``` - -(resize_nfs)= - -## {{file_cabinet}} Resizing User Storage - -The directories for user data, `/home` and `/shared`, are NFS shares mounted to each SRD. -These shares have a declared, finite size. -It is therefore possible for this space to become entirely consumed. - -In the case of the `/home` directory, this will prevent normal users from being able to login to the SRD as they will be unable to write to their own home directories. -This can be fixed by increasing the size of the `/home` NFS share. - -![Portal: 5 minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-azure&label=portal&color=blue&message=five%20minutes) - -- Navigate to the SRE's storage resource group `RG_SHM__SRE__Storage` -- Navigate to the user data storage account `userdata` -- On the left-hand navigation bar, select "File shares" under the "Data storage" heading -- You will see the file shares, "home" and "shared". -- Click on the ellipsis on the same row as the share you would like to adjust, at the far right of the page, and in the pop-up menu click "Size and performance". -- A form will appear where you can enter the share size in GiB. - Enter the new size and click the "Save" button. - -(renew_ssl)= - -## {{alarm_clock}} Renewing SRE Domain Certificates - -The remote desktop frontend of an SRE will need to periodically have its SSL certificate renewed so that it can be accessed via HTTPS. -After each 90 day period that the SRE is live, re-run the script to update the certificate. - -![Powershell: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Update_SRE_SSL_Certificate.ps1 -shmId -sreId -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE - -(renew_sas)= - -## {{locked_with_key}} Renew SRE Container Access Policies - -The [SRE storage containers](role_researcher_user_guide_shared_storage) for input data, backup and output are all provided by blob storage. -The SRDs use [SAS tokens](https://learn.microsoft.com/en-us/azure/storage/common/storage-sas-overview) bound to a [Stored Access Policy](https://learn.microsoft.com/en-us/azure/storage/common/storage-stored-access-policy-define-dotnet) to authenticate and access the data. - -When the containers are deployed the Stored Access Policy is valid for one year. -If a SRE is deployed for longer than this, the policy will need to be renewed in order to maintain access to these containers. - -![Powershell: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=five%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Update_Stored_Access_Policies.ps1 -shmId -sreId -``` - -(resize_vm)= - -## {{arrow_upper_right}} Resize the Virtual Machine (VM) of a Secure Research Desktop (SRD) - -Sometimes during a project that uses a deployed SRE, researchers may find the available compute inadequate for their purposes and wish to increase the size of the SRD's VM. The **simplest way to resize a VM is via the Azure Portal**, but it can also be done via script. - -To resize via the Azure Portal: - -- Log into the Azure portal and locate the VM inside the Resource Group called `RG_SHM__SRE__COMPUTE` -- [Follow these instructions](https://learn.microsoft.com/en-us/azure/virtual-machines/resize-vm?tabs=portal) in the Azure portal - -
- -To resize via script: - - -- Log into the Azure portal and locate the VM inside the Resource Group called `RG_SHM__SRE__COMPUTE` -- Make a note of the last octet of the IP address - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Add_Single_SRD.ps1 -shmId -sreId -ipLastOctet [-vmSize ] -Upgrade -Force -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE -- where `` is last octet of the IP address (check what this is in the Azure Portal) -- where `` is the new [Azure VM size](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) -- where `` is required to ensure the old VM is replaced -- where `` ensures that `` works even when the VM is built with the same image - -
- -```{tip} -If the new `VM size` you want isn't shown as available in the Azure Portal, there are several steps that can be taken. - -Firstly, try **stopping the VM** and checking again whether the size you want is available, as this can reveal additional options that aren't shown whilst the VM is running. For example, when resizing to an N-series VM in Azure, (see {ref}`using_gpus`) we've found that NVIDIA options such as the NVv3-series are not always shown as available. - -Next, you can try to **request an increase** in the vCPU quota for the VM family of the desired VM: -- Navigate to the Azure Portal and on the subscription page, click `Usage + quotas` under `Settings` -- Choose the family appropriate to the VM that you want to resize to, and select a region appropriate for the SRE -- Click the pen icon and set the `New Limit` to at least the number of vCPUs required by the VM that you want, the click submit -- After the request is accepted, resize the VM as above -- In some cases, the quota increase may require a request to be submitted to Microsoft -``` - -(add_new_srd)= - -## {{heavy_plus_sign}} Add a new SRD - -The `-VmSizes` parameter provided when deploying the SRE (with the `Deploy_SRE.ps1` script) determines how many SRDs are created and how large each one will be. - -To deploy a new SRD into the SRE environment, follow the below instructions: - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Add_Single_SRD.ps1 -shmId -sreId -ipLastOctet [-vmSize ] -``` - -- where `` is the {ref}`management environment ID ` for this SHM -- where `` is the {ref}`secure research environment ID ` for this SRE -- where `` is last octet of the IP address (this must be different to any other SRD VMs) - -(using_gpus)= - -## {{minidisc}} Using GPUs in SRDs - -When you {ref}`resize_vm` or {ref}`add_new_srd` featuring a GPU (N-series in Azure), you'll need to ensure it has an Nvidia GPU (as opposed to AMD or other). -See the [Azure docs](https://learn.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) for more information. -This is because only Nvidia GPUs support the drivers and CUDA libraries installed on the SRD image. - -To test that a GPU enabled VM is working as expected, log into the SRE and type `nvidia-smi` into the terminal. - -## {{crown}} Performing operations that require superuser privileges - -If you need to perform any operations in the SRE that require root access, you will need to log into the `compute` VM via the Serial Console in the Azure Portal. - -```{include} snippets/01_console.partial.md -:relative-images: -``` - -## {{fire}} Remove a single SRE - -In order to tear down an SRE, use the following procedure: - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Ensure you are logged into `Azure` within `Powershell` using the command: `Connect-AzAccount`. This command will give you a URL and a short alphanumeric code. You will need to visit that URL in a web browser and enter the code -- NB. If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into. -- Run the following script: - - ```powershell - ./SRE_Teardown.ps1 -shmId -sreId - ``` - -- If you provide the optional `-dryRun` parameter then the names of all affected resources will be printed, but nothing will be deleted - -The `SRE_Teardown.ps1` script *does not* remove the SRE data storage account stored in the SHM. Thus, the `ingress`, `egress`, and `backup` data folders still exist. -This allows the data to be used for the project associated with the SRE to exist before and after the project starts and ends, without requiring the full SRE to be running. - -```{attention} -To avoid accidental deletion, the storage account must be deleted manually through the Azure Portal. -The storage account can be found under `RG_SHM__PERSISTENT_DATA`, with a name similar to `data`. -Deleting the SRE storage account from `RG_SHM__PERSISTENT_DATA` will delete any work that was done in the SRE. -``` - -## {{end}} Remove a complete Safe Haven - -### {{collision}} Tear down any attached SREs - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Ensure you are logged into `Azure` within `Powershell` using the command: `Connect-AzAccount`. This command will give you a URL and a short alphanumeric code. You will need to visit that URL in a web browser and enter the code - - ```{attention} - If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into. - ``` - -- For each SRE attached to the SHM, do the following: - - Tear down the SRE by running: - - ```powershell - ./SRE_Teardown.ps1 -sreId - ``` - - where the SRE ID is the one specified in the relevant config file - - ```{note} - If you provide the optional `-dryRun` parameter then the names of all affected resources will be printed, but nothing will be deleted - ``` - -The `SRE_Teardown.ps1` script *does not* remove the SRE data storage account stored in the SHM. Thus, the `ingress`, `egress`, and `backup` data folders still exist. -This allows the data to be used for the project associated with the SRE to exist before and after the project starts and ends, without requiring the full SRE to be running. - -```{attention} -To avoid accidental deletion, the storage account must be deleted manually through the Azure Portal. -The storage account can be found under `RG_SHM__PERSISTENT_DATA`, with a name similar to `data`. -Deleting the SRE storage account from `RG_SHM__PERSISTENT_DATA` will delete any work that was done in the SRE. -``` - -### {{unlock}} Disconnect from the Microsoft Entra ID - -Connect to the **SHM Domain Controller (DC1)** via Remote Desktop Client over the SHM VPN connection - -- Log in as a **domain** user (ie. `@`) using the username and password obtained from the Azure portal -- If you see a warning dialog that the certificate cannot be verified as root, accept this and continue -- Open Powershell as an administrator - - Navigate to `C:\Installation` - - Run `.\Disconnect_AD.ps1` - - You will need to provide login credentials (including MFA if set up) for `@` - -```{attention} -Full disconnection of the Microsoft Entra ID can take up to 72 hours but is typically less. -If you are planning to install a new SHM connected to the same Microsoft Entra ID you may find the `AzureADConnect` installation step requires you to wait for the previous disconnection to complete. -``` - -### {{bomb}} Tear down the SHM - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Ensure you are logged into `Azure` within `Powershell` using the command: `Connect-AzAccount`. This command will give you a URL and a short alphanumeric code. You will need to visit that URL in a web browser and enter the code - - ```{attention} - If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into. - ``` - -- Tear down the SHM by running: - - ```powershell - ./SHM_Teardown.ps1 -shmId - ``` - - where `` is the {ref}`management environment ID ` specified in the configuration file. diff --git a/docs/source/roles/system_manager/manage_users.md b/docs/source/roles/system_manager/manage_users.md deleted file mode 100644 index 2155baa3f4..0000000000 --- a/docs/source/roles/system_manager/manage_users.md +++ /dev/null @@ -1,350 +0,0 @@ -(administrator_manage_users)= - -# Managing Data Safe Haven users - -```{important} -This document assumes that you already have access to a {ref}`Safe Haven Management (SHM) environment ` and one or more {ref}`Secure Research Environments (SREs) ` that are linked to it. -``` - -(create_new_users)= - -## {{beginner}} Create new users - -Users should be created on the main domain controller (DC1) in the SHM and synchronised to Microsoft Entra ID. -A helper script for doing this is already uploaded to the domain controller - you will need to prepare a `CSV` file in the appropriate format for it. - -(security_groups)= - -### {{lock}} SRE Security Groups - -Each user should be assigned to one or more Active Directory "security groups". - -- `SG Research Users`: Almost all researchers should be in this group. No special permissions. Allows users to log in to ``. -- `SG Data Administrators`: Researchers who can create/modify/delete tables in the `data` schema on databases within ``. `SG Research Users` can only read these tables. Restricting this access prevents most users from creating/deleting arbitrary tables, which is important because some SREs have their input data in database form. -- `SG System Administrators`: Researchers who have full superuser privileges on databases within ``. Users in this group have full access to the databases and can manipulate them in any way they choose. System Administrators also have administration privileges on the [Guacamole Remote Desktop](https://guacamole.apache.org/doc/gug/administration.html). - -Typically, users with either of the latter two roles should also have the `Research Users` role to allow them to log in to the SRDs within the SRE. - -(generate_user_csv)= - -## {{scroll}} Generate user details CSV file - -On the **SHM domain controller (DC1)**. - -```{include} ../../deployment/snippets/user_csv_format.partial.md -:relative-images: -``` - -## {{arrows_counterclockwise}} Create and synchronise users - -Upload the user details `CSV` file to a sensible location on the SHM domain controller (recommended: `C:\Installation`). -This can be done by copying and pasting the file from your deployment device to the SHM DC. - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the login credentials {ref}`stored in Azure Key Vault ` -- Open a `Powershell` command window with elevated privileges -- Run `C:\Installation\CreateUsers.ps1 ` -- This script will add the users and trigger synchronisation with Microsoft Entra ID -- It will still take around 5 minutes for the changes to propagate - -```{error} -If you get the message `New-ADUser : The specified account already exists` you should first check to see whether that user actually does already exist! -Once you're certain that you're adding a new user, make sure that the following fields are unique across all users in the Active Directory. - -- `SamAccountName` - - Specified explicitly in the `CSV` file. - - If this is already in use, consider something like `firstname.middle.initials.lastname` -- `DistinguishedName` - - Formed of `CN=,` by `Active Directory` on user creation. - - If this is in use, consider changing `DisplayName` from ` ` to ` ` . -``` - -```{danger} -- These domain administrator credentials have complete control over creating and deleting users as well as assigning them to groups -- Do not use them except where specified and never write them down! -- Be particularly careful never to use them to log in to any user-accessible VMs (such as the SRDs) -``` - -(adding_users_manually)= - -### {{woman}} {{man}} Modifying user SRE access - -Users may have been added to one or more {ref}`security_groups` through setting the `GroupName` field in the `user_details_template.csv` (see {ref}`generate_user_csv`). Security Group assignments can also be manually modified via the following: - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the login credentials {ref}`stored in Azure Key Vault ` -- In Server Manager click `Tools > Active Directory Users and Computers` -- Click on `Safe Haven Security Groups` -- Find the group that the user needs to be added to (see {ref}`security_groups`) -- Right click on the group and click `Properties` -- Click the `Members` tab -- To add a user click `Add...` - - Enter a part of the user's name and click `Check Names` - - Select the correct user and click `OK`, then click `OK` again until the window closes -- To remove a user click on the username of the person and then `Remove` - - Click `Yes` if you're sure this user should no longer have access to this SRE, then click `OK` again until the window closes -- Open a `Powershell` command window with elevated privileges -- Run `C:\Installation\Run_ADSync.ps1` - -### {{iphone}} Edit user details - -The `DC1` is the source of truth for user details. If these details need to be changed, they should be changed in the `DC1` and then synchronised to Microsoft Entra ID. - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the login credentials {ref}`stored in Azure Key Vault ` -- In Server Manager click `Tools > Active Directory Users and Computers` -- Click on `Safe Haven Research Users` -- Find the person, right click on them and select `Properties` -- To edit a **phone number**, select the `Telephones` tab and edit the `Mobile` number - - Click `OK` to save the new number - - Open a `Powershell` command window with elevated privileges - - Run `C:\Installation\Run_ADSync.ps1` -- To edit a user's **email** or their **username** (or first name or last name) you'll need to delete the user entirely and recreate them, meaning they'll have to set up their accounts (including MFA) again - - Find the person, right click on them and click `Delete` - - Click `OK` - - Open a `Powershell` command window with elevated privileges - - Run `C:\Installation\Run_ADSync.ps1` - - Create a new csv (or edit an existing) one with the correct user details (see {ref}`create_new_users`) - - Run `C:\Installation\CreateUsers.ps1 ` - - Run `C:\Installation\Run_ADSync.ps1` -- You can check the changes you made were successful by logging into the Azure Portal as the Microsoft Entra admin - - Open `Microsoft Entra ID` - - Click on `Users` under `Manage` and search for the user - - Click on the user and then `Edit properties` and confirm your changes propagated to Microsoft Entra ID - -(deleting_users)= - -### {{x}} Deleting users - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the login credentials {ref}`stored in Azure Key Vault ` -- In Server Manager click `Tools > Active Directory Users and Computers` -- Click on `Safe Haven Research Users` -- Find the person, right click on them and click `Delete` -- Open a `Powershell` command window with elevated privileges -- Run `C:\Installation\Run_ADSync.ps1` -- You can check the user is deleted by logging into the Azure Portal as the AAD admin - - Open `Microsoft Entra ID` - - Click on `Users` under `Manage` and search for the user - - Confirm the user is no longer present - -### {{x}} Automatically deleting all unassigned users - -In some situations, such as at the end of a project after an SRE has been torn down, you may want to remove all users from the SHM who are not assigned to the security group of any remaining attached SREs. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Run `./SHM_Delete_Unassigned_Users.ps1 -shmId ` (use the `-dryRun` flag to see who would get deleted with out performing the deletion) - -## {{calling}} Assign MFA licences - -### {{hand}} Manually add licence to each user - -- Login into the Azure Portal and connect to the correct Microsoft Entra ID -- Open `Microsoft Entra ID` -- Select `Manage > Licenses > All Products` -- Click `Azure Active Directory Premium P1` -- Click `Assign` -- Click `Users and groups` -- Select the users you have recently created and click `Select` -- Click `Assign` to complete the process - -```{note} -`Azure Active Directory Premium P1` is being renamed to `Microsoft Entra ID P1` and may appear as such when performing the assignment process in future. -``` - -### {{car}} Automatically assign licences to users - -To automatically assign licences to all local `Active Directory` users that do not currently have a licence in `Microsoft Entra ID`: - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/administration` directory within the Data Safe Haven repository -- Run the `./SHM_Add_AAD_Licences.ps1 -tenantId ` script, where `` is the ID of the Azure tenant belonging to the SHM you want to add the licences to - -## {{running}} User activation - -We recommend using email to send connection details to new users. - -```{note} -This is not a security risk since: -- we are not sending passwords in this email -- the user needs access to their previously-provided phone number in order to set their account password and MFA -``` - -A sample email might look like the following - -> Dear \, -> -> Welcome to \! You've been given access to a Data Safe Haven managed by \. -> Please find a PDF version of our user guide attached. -> You should start by following the instructions about setting up your account and enabling multi-factor authentication (MFA). -> -> Your username is: \ -> Your Safe Haven is hosted at: \ -> -> The Safe Haven is only accessible from certain networks and may also involve physical location restrictions. -> -> --details about network and location/VPN restrictions here-- - -(modifying_network)= - -## {{globe_with_meridians}} Changing user network access restrictions - -One of the controls used by Tier 2/3 SREs is to restrict access based on network addresses. -The network addresses that are allowed to access an SRE can be modified after deployment. -This is useful if users require access from new, or different, IP addresses. -For example if their institutional IP address changes, or an additional location is approved. - -- In the Azure Portal, navigate to `RG_SHM__SRE__NETWORKING` -- On the `Overview` tab, navigate to `NSG_SHM__SRE__GUACAMOLE`, the Network Security Group for the remote desktop service -- Navigate to `Inbound Security Rules`, and open the entry called `AllowUsersApprovedHttpsInbound` -- Update the `Source IP addresses/CIDR ranges` field to include IP addresses that should be able to access the SRE and remove any that should not -- Users will now be able to access the remote desktop interface from only the desired IP addresses - -## {{construction_worker}} Common user problems - -One of the most common user issues is that they are unable to log in to the environment. -Here we go through the login procedure and discuss possible problems at each step - -### {{waning_crescent_moon}} Expired webclient certificate - -If the certificate for the SRE domain has expired, users will not be able to login. - -```{image} administrator_guide/login_certificate_expiry.png -:alt: Login failure - expired certificate -:align: center -``` - -```{tip} -**Solution**: Replace the SSL certificate with a new one - -- {ref}`renew_ssl` -``` - -### {{red_circle}} Unable to log into remote desktop gateway - -If users give the wrong username or password they will not be able to progress past the login screen. - -```{image} administrator_guide/login_password_login.png -:alt: Login failure - wrong password -:align: center -``` - -```{tip} -**Solution**: Check user credentials, password may need to be reset. -``` - -### {{see_no_evil}} Unable to see SRD or SSH connection options - -After logging in with Microsoft, users can't see the option to log into the SRE via the SRD or SSH options. - -```{image} administrator_guide/no_recent_connections.png -:alt: Unable to see SRD or SSH connection options -:align: center -``` - -```{tip} -**Solution**: Ensure the user is added to the correct Security Group for the SRE - -- See {ref}`adding_users_manually` -``` - -### {{broken_heart}} Xorg login failure on the SRD - -If users can get to the login screen: - -```{image} administrator_guide/srd_login_prompt.png -:alt: SRD login screen -:align: center -``` - -but then see this error message: - -```{image} administrator_guide/srd_login_failure.png -:alt: SRD login failure -:align: center -``` - -there are a couple of possible causes. - -```{error} -**Problem**: the username or password was incorrectly entered - -**Solution**: check username and password - -- Confirm that the username and password have been correctly typed -- Confirm that there are no unsupported special characters in the password -- Reset the account if there is no other solution -``` - -```{error} -**Problem**: the computer is unable to communicate with the login server - -**Solution**: run diagnostics - -- This can happen for a variety of reasons (DNS problems, broken services on the SRD etc.) -- Run the script under `deployment/administration/SRE_SRD_Remote_Diagnostics.ps1`, providing the group and last IP octet of the problematic SRD -- This will run a series of diagnostics intended to fix some common problems including - - LDAP configuration - - DNS configuration - - SSS configuration - - File mounting configuration -``` - -### {{nut_and_bolt}} Password reset failure - -When creating an account or resetting a password, the users get the following screen: - -```{image} administrator_guide/password_reset_failure.png -:alt: Password reset failure -:align: center -``` - -```{error} -**Problem**: the password could not be reset - -**Solution**: remove and re-add the password reset configuration on the DC1 - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM using the login credentials {ref}`stored in Azure Key Vault ` -- Open a `Powershell` command window with elevated privileges -- Run `$aadConnector = Get-ADSyncConnector | ? {$_.Name -match "onmicrosoft.com - AAD"}` -- Run `Remove-ADSyncAADPasswordResetConfiguration -Connector $aadConnector.Name` -- Run `Set-ADSyncAADPasswordResetConfiguration -Connector $aadConnector.Name -Enable $true` -- Check the configuration is reset by running `Get-ADSyncAADPasswordResetConfiguration -Connector $aadConnector.Name` -- Ask the user to reset their password again -``` - -### {{cloud}} Unable to install from package mirrors - -If it is not possible to install packages from the package mirrors then this may be for one of the following reasons: - -```{error} -**Problem**: Mirror VNet is not correctly peered - -**Solution**: Re-run the network configuration script. - -On your **deployment machine**. - -- Ensure you have the same version of the Data Safe Haven repository as was used by your deployment team -- Open a `Powershell` terminal and navigate to the `deployment/secure_research_environment/setup` directory within the Data Safe Haven repository -- Ensure you are logged into `Azure` within `Powershell` using the command: `Connect-AzAccount` - - NB. If your account is a guest in additional Azure tenants, you may need to add the `-Tenant ` flag, where `` is the ID of the Azure tenant you want to deploy into. -- Run the `./Apply_Network_Configuration.ps1 -sreId ` script, where the SRE ID is the one specified in the config -``` - -````{error} -**Problem**: Internal mirror does not have the required package - -**Solution**: Check package availability - -To diagnose this, log into the `Internal` mirror using the Serial Console through the `Azure` portal. -Check the packages directory (i.e. `/datadrive/mirrordaemon/pypi/web/packages` for PyPI or `/datadrive/mirrordaemon/www/cran` for CRAN) - -```{image} administrator_guide/internal_mirror_packages.png -:alt: Internal mirror package list -:align: center -```` - -If the requested package **should** be available (i.e. it is on the appropriate allowlist), then you can force a mirror update by rebooting the `EXTERNAL` mirrors. -This will trigger the following actions: - -1. Synchronisation of the external mirror with the remote, internet repository (a `pull` update) -2. Synchronisation of the internal mirror with the external mirror (a `push` update) - -This may take an hour or two but should solve the missing package problem. diff --git a/docs/source/roles/system_manager/manage_webapps.md b/docs/source/roles/system_manager/manage_webapps.md deleted file mode 100644 index 9f739a7bb9..0000000000 --- a/docs/source/roles/system_manager/manage_webapps.md +++ /dev/null @@ -1,44 +0,0 @@ -(administrator_manage_webapps)= - -# Managing web applications - -```{important} -This document assumes that you already have access to a {ref}`Safe Haven Management (SHM) environment ` and one or more {ref}`Secure Research Environments (SREs) ` that are linked to it. -``` - -During deployment of an SRE, distinct virtual machines are created to host each of the two standard web applications - `CodiMD` and `Gitlab`. - -In principle, these should require no further direct interaction. Researchers using Secure Research Desktops will be able to interact with the servers through a web interface. `CodiMD` and `Gitlab` authenticate with the domain controller via LDAP. - -However, it is possible for the virtual machine hosting the web app servers to successfully start without the web app servers themselves actually running. For example, Researchers using an `SRD` may find that the web apps are unavailable, or do not successfully authenticate log-in attempts. In such cases, command line access to the virtual machines hosting the web app servers may help to diagnose and resolve problems. - -In the rest of this document, `` is the {ref}`Secure Management Environment ID ` for the SHM, and `` is the {ref}`Secure Research Environment ID ` for the SRE. - -## Checking build logs - -An initial step could be to check the build logs of the virtual machine to ascertain whether any clear errors occurred during the process (e.g. the installation of the server software may have failed). - -- From the `Azure` portal, navigate to the web app resource group `RG_SHM__SRE__WEBAPPS`. -- Click on the relevant VM (e.g. `CODIMD-SRE-`) -- From the menu on the left, scroll down to the `Help` section and select `Boot diagnostics` -- Click `Serial log` to access a full text log of the booting up of the VM. - -From the log, you may be able to determine whether and why part of the build process failed. In some cases it may be sufficient to delete and rebuild the VM. - -- From the menu on the left, click `Overview` -- Click `Delete` -- Redeploy the web app servers using `Powershell` locally - -![Powershell: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=five%20minutes) at {{file_folder}} `./deployment/secure_research_environment/setup` - -```powershell -PS> ./Setup_SRE_WebApp_Servers.ps1 -``` - -If the reason for failure is less clear, accessing the command line interface directly may help. - -## Accessing the VM console - -```{include} snippets/01_console.partial.md -:relative-images: -``` diff --git a/docs/source/roles/system_manager/migrate_an_shm.md b/docs/source/roles/system_manager/migrate_an_shm.md deleted file mode 100644 index 0feb2c8648..0000000000 --- a/docs/source/roles/system_manager/migrate_an_shm.md +++ /dev/null @@ -1,234 +0,0 @@ -# Migrating an SHM - -These instructions will walk you through migrating an existing Safe Haven Management (SHM) environment to a newer **Data Safe Haven** release and migrating the users to it. - -```{note} -This document assumes that you have already deployed a {ref}`Safe Haven Management (SHM) environment ` and one or more {ref}`Secure Research Environments (SREs) ` that are linked to it. -``` - -```{danger} -This is a complex operation and may not always work. You may lose data or user accounts in the migration process. -``` - -```{include} ../../deployment/snippets/00_symbols.partial.md -:relative-images: -``` - -## 1. {{seedling}} Prerequisites - -- All of the {ref}`Safe Haven Management (SHM) environment ` prerequisites - -The following variables will be used during deploying - -- ``: the {ref}`management environment ID ` for the previously deployed SHM -- ``: the {ref}`management environment ID ` for the new SHM you want to deploy -- ``: the {ref}`Tenant ID ` for the `Microsoft Entra ID` that your previously deployed SHM is connected to - -## 2. {{unlock}} Disconnect the old domain controller from the Microsoft Entra ID - -![Remote: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=one%20minute) - -- Log into the **SHM primary domain controller** for the old SHM (`DC1-SHM-`) VM using the `private IP address`, `` and `` from the portal -- Open `Powershell` as an administrator - - Navigate to `C:\Installation` - - Run `.\Disconnect_AD.ps1` - - You will need to provide login credentials (including MFA if set up) for `@` - -```{warning} -Do not attempt to add users to the old SHM after this point as they will not be synchronised to the `Microsoft Entra ID`! -``` - -```{attention} -Full disconnection of the `Microsoft Entra ID` can take up to 72 hours but will typically take around one day. -``` - -## 3. {{clipboard}} Safe Haven Management configuration - -- Create a copy of the configuration file for your previous SHM -- You may want to change some of the following attributes: - -```json -{ - "azure": { - "subscriptionName": "Azure subscription to deploy the SHM into. You might want to use a different subscription than for your previous SHM." - }, - "shmId": "The for the new SHM. If you try to deploy two SHMs with the same ID into the same subscription some resources will not deploy correctly." -} -``` - -## 4. {{door}} Configure DNS for the custom domain - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 5. {{file_folder}} Ensure the Microsoft Entra domain is registered - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -```{note} -You will need to use a Microsoft Entra global admin when the `AzureAD` module asks you to sign-in. -``` - -## 6. {{key}} Deploy Key Vault for SHM secrets and create emergency admin account - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -```{note} -You will need to use a Microsoft Entra global admin when the `AzureAD` module asks you to sign-in. -``` - -## 7. {{station}} Deploy network and VPN gateway - -![Powershell: twenty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=twenty%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 8. {{house_with_garden}} Deploy the domain controllers - -![Powershell: one hour](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=one%20hour) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -```{important} -Do **not** run any of the domain controller configuration steps yet -``` - -## 9. {{zap}} Configure the new domain controllers - -### {{lock_with_ink_pen}} Suspend MFA for all users - -![Microsoft Entra ID: under a minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Azure%20AD&color=blue&message=under%20a%20minute) - -- From the `Azure` portal, navigate to the AAD. -- Click `Security` in the left hand sidebar -- Click `Conditional access` in the left hand sidebar -- Click the `Require MFA` policy from the policy list - - Toggle `Enable policy` to `Off` - - Click the `Save` button - -### {{busts_in_silhouette}} Copy SHM users from old domain controller - -![Powershell: five minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=five%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -```powershell -PS> ./Copy_SHM_Users.ps1 -oldShmId -newShmId -``` - -- where `` is the {ref}`management environment ID ` for the previously deployed SHM -- where `` is the {ref}`management environment ID ` for this SHM - -### {{anchor}} Reset Azure AD source anchors - -![Remote: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=ten%20minutes) - -- Log into the **SHM primary domain controller** (`DC1-SHM-`) VM that you have just deployed using the `private IP address`, `` and `` that you obtained from the portal - -Run the following `Powershell` commands - -```powershell -# Get local users -$userOuPath = (Get-ADObject -Filter * | Where-Object { $_.Name -eq "Safe Haven Research Users" }).DistinguishedName -$users = Get-ADUser -Filter * -SearchBase "$userOuPath" -Properties * - -# Connect to Microsoft Entra ID -# Use the credentials for a Microsoft Entra global admin (eg. `aad.admin.firstname.surname@`) -Connect-MsolService - -# Reset source anchor for Microsoft Entra users -foreach ($user in $users) { - $immutableId = [System.Convert]::ToBase64String($user.ObjectGUID.ToByteArray()) - Set-MsolUser -UserPrincipalName $($user.UserPrincipalName) -immutableID $immutableId - Write-Output "Set source anchor for $($user.UserPrincipalName) to $immutableId" -} -``` - -```{note} -All research users in this SHM will have to go to `https://aka.ms/sspr` to reset their passwords although their MFA configuration will stay the same. -``` - -### {{train}} Install Microsoft Entra Connect - -![Remote: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=ten%20minutes) - -See the {ref}`Safe Haven Management documentation ` for more details. - -````{include} snippets/02_ms_entra_connect.partial.md -:relative-images: -```` - -````{error} -Since you are trying to connect the new SHM to an Microsoft Entra ID that was already synchronised, you may find the `AzureADConnect` installation fails due to a `Directory synchronisation failure`. - -```{image} migrate_shm/aad_connection_failure.png -:alt: AAD connection failure -:align: center -``` - -If this happens then you will need to wait for the previous disconnection to complete, which may take up to 72 hours. -```` - -### {{recycle}} Update Microsoft Entra Connect rules - -![Remote: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=one%20minute) - -See the {ref}`Safe Haven Management documentation ` for more details. - -### {{put_litter_in_its_place}} Unregister the old domain controller in Microsoft Entra ID - -![Microsoft Entra ID: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=one%20minute) - -- From the `Azure` portal, navigate to the Microsoft Entra ID you have created. -- Select `Microsoft Entra Connect` from the left hand menu -- Select `Connect Sync` from the left hand menu -- Under `Health And Analytics` click `Microsoft Entra Connect Health` -- Select `Sync services` from the left hand menu -- Click on `.onmicrosoft.com` -- Click on the `Microsoft Entra Connect Server` that corresponds to the **old** DC (marked as `Unhealthy`) -- Click `Delete` in the top bar, type the server name when prompted then click `Delete` - -### {{ballot_box_with_check}} Validate Active Directory synchronisation - -![Remote: one minute](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-onedrive&label=remote&color=blue&message=one%20minute) - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 10. {{police_car}} Deploy and configure network policy server - -![Powershell: twenty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=twenty%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 11. {{closed_lock_with_key}} Require MFA for all users - -![Microsoft Entra ID: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Microsoft%20Entra%20ID&color=blue&message=a%20few%20minutes) - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 12. {{no_pedestrians}} Block portal access for normal users - -![Azure AD: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=microsoft-academic&label=Azure%20AD&color=blue&message=a%20few%20minutes) - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 13. {{package}} Deploy Python/R package repositories - -![Powershell: thirty minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=thirty%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 14. {{chart_with_upwards_trend}} Deploy logging - -![Powershell: a few minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=a%20few%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. - -## 15. {{fire_engine}} Deploy firewall - -![Powershell: ten minutes](https://img.shields.io/static/v1?style=for-the-badge&logo=powershell&label=local&color=blue&message=ten%20minutes) at {{file_folder}} `./deployment/safe_haven_management_environment/setup` - -See the {ref}`Safe Haven Management documentation ` for more details. diff --git a/docs/source/roles/system_manager/migrate_shm/aad_connection_failure.png b/docs/source/roles/system_manager/migrate_shm/aad_connection_failure.png deleted file mode 100644 index 74f657ceff..0000000000 Binary files a/docs/source/roles/system_manager/migrate_shm/aad_connection_failure.png and /dev/null differ diff --git a/docs/source/roles/system_manager/snippets/01_console.partial.md b/docs/source/roles/system_manager/snippets/01_console.partial.md deleted file mode 100644 index 50e5a1ac0d..0000000000 --- a/docs/source/roles/system_manager/snippets/01_console.partial.md +++ /dev/null @@ -1,17 +0,0 @@ -Console access to the SRE VMs, including those for each web app and the `compute` VM, can be achieved through the `Azure` portal. All VMs share the same ``, but each has its own ``, which will need to be retrieved from the `SRE` key vault before accessing the console. - -- From the `Azure` portal, navigate to the Resource Group `RG_SHM__SRE__SECRETS` -- Click on the `SRE` keyvault `kv-_SRE_` -- From the menu on the left, select `Secrets` from the `Objects` section. -- All VMs share the same ``, found in the `sre--vm-admin-username` secret. -- Each VM has its own ``, found in the `sre--vm-admin-password-` secret. - -Once you have the `` and ``, you will be able to log in to the VM console as follows: - -- From the `Azure` portal, navigate to the correct resource group: - - `RG_SHM__SRE__WEBAPPS` for the web applications - - `RG_SHM__SRE__COMPUTE` for the compute VM -- Click on the relevant VM -- From the menu on the left, scroll down to the `Help` section and select `Serial console` -- After a short time, you will be shown the console for the VM. You may need to press a key to be shown the login prompt. -- Log in with the details you retrieved earlier to be given root access to the VM. diff --git a/docs/source/roles/system_manager/snippets/02_ms_entra_connect.partial.md b/docs/source/roles/system_manager/snippets/02_ms_entra_connect.partial.md deleted file mode 100644 index aafeda4d79..0000000000 --- a/docs/source/roles/system_manager/snippets/02_ms_entra_connect.partial.md +++ /dev/null @@ -1,3 +0,0 @@ -````{note} -Microsoft Entra Connect is the new name for Azure AD Connect. However, although all Microsoft documentation and entries in the Azure portal now refer to Microsoft Entra Connect, as of this release of the Data Safe Haven, the software itself is still named `Azure AD Connect`. It will appear as such on your Domain Controller. -```` diff --git a/environment_configs/package_lists/allowlist-core-python-pypi-tier3.list b/environment_configs/package_lists/allowlist-core-python-pypi-tier3.list deleted file mode 100644 index ebb35dffc0..0000000000 --- a/environment_configs/package_lists/allowlist-core-python-pypi-tier3.list +++ /dev/null @@ -1,142 +0,0 @@ -aero-calc -aesara -altair -annoy -arviz -beautifulsoup4 -black -blosc -bokeh -bulwark -Cartopy -catboost -colorama -cons -Cython -dash -dask -Deprecated -descartes -dill -distributed -dtw -eli5 -etuples -Fiona -flair -flake8 -Flask -Flask-Bootstrap -folium -GDAL -gensim -geopandas -geoplot -GPy -GPyOpt -gym -h5py -html5lib -imageio -ipykernel -Jinja2 -joblib -jupyter -jupyterlab -keras -Keras-Applications -kiwisolver -langdetect -lifelines -lightgbm -logical-unification -lxml -mapclassify -Markdown -matplotlib -miniKanren -mock -mpctools -multipledispatch -mypy -NavPy -nltk -nose -numpy -numpydoc -odo -pandas -pandas-datareader -pandas-profiling -pandasql -pathos -pdfminer -pg8000 -Pillow -Pint -plotly -poetry -prophet -psycopg2 -pycosat -pycurl -pydot -pygrib -pyLDAvis -pylint -pymc3 -pymongo -pyodbc -PyOpenGL -pyproj -pyshp -pystan -pytest -python-dateutil -python-geohash -python-louvain -PyYAML -regex -repro-catalogue -requests -rpy2 -Rtree -safety -scikit-image -scikit-learn -scikit-multilearn -scikit-survival -scikit-validate -scipy -seaborn -Shapely -six -sklearn-contrib-lightning -spacy -spacy-langdetect -Sphinx -SQLAlchemy -statsmodels -suod -sympy -syntok -tables -tdda -tensorboard -tensorflow -tensorflow-gpu -thinc -tika -tokenizers -torch -torchvision -tqdm -transformers -tsfresh -Twisted -unicodecsv -vincent -wordcloud -xgboost -xlrd -XlsxWriter diff --git a/environment_configs/package_lists/allowlist-core-r-cran-tier3.list b/environment_configs/package_lists/allowlist-core-r-cran-tier3.list deleted file mode 100644 index 7fc527f500..0000000000 --- a/environment_configs/package_lists/allowlist-core-r-cran-tier3.list +++ /dev/null @@ -1,89 +0,0 @@ -arrow -BiocManager -car -caret -colorspace -csv -data.table -dataCompareR -DataExplorer -DBI -devtools -DiagrammeR -DT -dygraphs -esquisse -foreign -ggforce -ggmap -ggvis -glmnet -googleVis -haven -htmlwidgets -httr -IRkernel -janitor -jsonlite -kdensity -knitr -leaflet -lme4 -maps -maptools -mgcv -mlr3 -MSSQL -multcomp -networkD3 -nlme -odbc -parsnip -plotly -pls -plumber -polycor -profvis -prophet -quanteda -quantmod -randomForest -ranger -RColorBrewer -Rcpp -RCurl -readxl -reshape2 -rgl -RMariaDB -rmarkdown -RMySQL -roxygen2 -RPostgres -RSQLite -rstan -scales -shiny -sp -sqldf -survival -testthat -text2vec -threeJS -tidymodels -tidytext -tidyverse -tidyxl -tmap -tsbox -validate -vcd -vroom -xgboost -XLConnect -xlsx -XML -xtable -xts -yaml -zoo diff --git a/environment_configs/package_lists/allowlist-full-python-pypi-tier3.list b/environment_configs/package_lists/allowlist-full-python-pypi-tier3.list deleted file mode 100644 index ff94b97627..0000000000 --- a/environment_configs/package_lists/allowlist-full-python-pypi-tier3.list +++ /dev/null @@ -1,773 +0,0 @@ -absl-py -aero-calc -aesara -affine -aiobotocore -aiocontextvars -aiofiles -aiohttp -aioitertools -aiosignal -aiosqlite -alabaster -altair -altgraph -annotated-types -annoy -ansi2html -ansimarkup -anyio -apispec -appdirs -appnope -arabic-reshaper -argcomplete -argon2-cffi -argon2-cffi-bindings -argparse -arviz -asn1crypto -astor -astroid -asttokens -astunparse -async_generator -async-lru -async-timeout -asyncio -asynctest -atomicwrites -attrs -Authlib -autocommand -autograd -autograd-gamma -Automat -awkward -awkward-cpp -awkward0 -Babel -backcall -backpack -backports.cached-property -backports.entry-points-selectable -backports.functools-lru-cache -backports.lzma -backports.os -backports.shutil_get_terminal_size -backports.shutil_which -backports.tempfile -backports.weakref -backports.zoneinfo -bcdoc -beautifulsoup4 -better-exceptions-fork -biscuits -black -bleach -blinker -blis -blosc -blosc2 -bokeh -boltons -boto3 -botocore -Bottleneck -bpemb -branca -Brotli -brotlicffi -bson -build -bulwark -CacheControl -cached-property -cachetools -cachy -Cartopy -catalogue -catboost -certifi -cffi -cftime -characteristic -chardet -charset-normalizer -clang -cleo -click -click-plugins -cligj -clikit -clint -cloudpathlib -cloudpickle -cmake -cmarkgfm -cmdstanpy -colorama -comm -commonmark -confection -configparser -conllu -cons -constantly -contextily -contextlib2 -contextvars -contoml -contourpy -convertdate -cookies -cramjam -crashtest -crcmod -cryptography -cssselect2 -cycler -cymem -Cython -dacite -dash -dash-core-components -dash-html-components -dash-table -dask -dataclasses -datasets -ddt -debugpy -decorator -defusedxml -deprecat -Deprecated -deprecation -descartes -dill -distlib -distributed -dm-tree -dnspython -docopt -docutils -dparse -dragonmapper -dtw -dulwich -ecos -eli5 -entrypoints -enum34 -ephem -etuples -exceptiongroup -executing -fastjsonschema -fastprogress -filelock -fiona -first -flair -flake8 -Flask -Flask-Bootstrap -Flask-Compress -flatbuffers -folium -fonttools -formulaic -freetype-py -frozenlist -fsspec -fst-pso -ftfy -funcsigs -functools32 -funcy -future -future-fstrings -futures -FuzzyTM -gast -GDAL -gdown -gensim -geographiclib -geopandas -geoplot -geopy -gitdb -gitdb2 -GitPython -glob2 -gnureadline -google-api-core -google-auth -google-auth-httplib2 -google-auth-oauthlib -google-cloud-core -google-cloud-storage -google-crc32c -google-pasta -google-resumable-media -googleapis-common-protos -GPy -GPyOpt -graphlib-backport -graphviz -greenlet -grpcio -gym -h11 -h2 -h5netcdf -h5py -HeapDict -hijri-converter -holidays -hotelling -hpack -hstspreload -html5lib -htmlmin -httpcore -httplib2 -httpstan -httpx -huggingface-hub -hyperframe -hyperlink -hyperopt -idna -idna_ssl -imagecodecs -imagecodecs-lite -ImageHash -imageio -imagesize -importlib -importlib-metadata -importlib-resources -incremental -inflect -iniconfig -installer -interface-meta -ipaddress -ipykernel -ipython -ipython_genutils -isort -itsdangerous -Janome -jaraco.apt -jaraco.classes -jaraco.collections -jaraco.context -jaraco.functools -jaraco.structures -jaraco.text -jaraco.timing -jaraco.ui -jaraco.windows -jarowinkler -jax -jedi -jeepney -Jinja2 -jmespath -joblib -js-regex -json5 -jsonschema -jsonschema-specifications -jupyter -jupyter-client -jupyter-core -jupyter-events -jupyter-lsp -jupyter-packaging -jupyter-server -jupyter-server-fileid -jupyter-server-terminals -jupyter-server-ydoc -jupyter-telemetry -jupyter-ydoc -jupyterlab -jupyterlab-launcher -jupyterlab-pygments -jupyterlab-server -jwt -keras -Keras-Applications -keras-nightly -Keras-Preprocessing -keyring -kiwisolver -konoha -korean-lunar-calendar -langcodes -langdetect -lapsolver -lazy_loader -lazy-object-proxy -libclang -lifelines -lightgbm -lit -llvmlite -locket -lockfile -logical-unification -loguru -LunarCalendar -lxml -lz4 -macholib -mapclassify -Markdown -markdown-it-py -markdown2 -MarkupSafe -marshmallow -matplotlib -matplotlib-inline -matrixprofile -mccabe -mdit-py-plugins -mdurl -memory-profiler -mercantile -miniKanren -missingno -mistune -ml-dtypes -mock -modutil -monotonic -more-itertools -mpctools -mpld3 -mpmath -msgpack -msgpack-python -multidict -multimethod -multipledispatch -multiprocess -multiprocessing -munch -murmurhash -mypy -mypy-extensions -namex -NavPy -nbclassic -nbclient -nbconvert -nbformat -ndindex -nest-asyncio -netCDF4 -networkx -nh3 -nltk -nose -notebook -notebook-shim -notifiers -numba -numexpr -numpy -numpydoc -nvidia-cublas-cu11 -nvidia-cublas-cu12 -nvidia-cuda-cupti-cu11 -nvidia-cuda-cupti-cu12 -nvidia-cuda-nvrtc-cu11 -nvidia-cuda-nvrtc-cu12 -nvidia-cuda-runtime-cu11 -nvidia-cuda-runtime-cu12 -nvidia-cudnn-cu11 -nvidia-cudnn-cu12 -nvidia-cufft-cu11 -nvidia-cufft-cu12 -nvidia-curand-cu11 -nvidia-curand-cu12 -nvidia-cusolver-cu11 -nvidia-cusolver-cu12 -nvidia-cusparse-cu11 -nvidia-cusparse-cu12 -nvidia-nccl-cu11 -nvidia-nccl-cu12 -nvidia-nvjitlink-cu12 -nvidia-nvtx-cu11 -nvidia-nvtx-cu12 -oauth2client -oauthlib -odo -olefile -opencv-python -opt-einsum -optree -ordereddict -orderedmultidict -oscrypto -osqp -overrides -packagebuilder -packaging -pandas -pandas-datareader -pandas-profiling -pandasql -pandocfilters -paramz -parso -partd -passlib -pastel -path -path.py -pathlib -pathlib-abc -pathlib2 -pathos -pathspec -pathy -patsy -pbr -pdfminer -pefile -pep517 -pep8 -pexpect -pg8000 -phik -pickleshare -pillow -Pint -pip -pip-api -pip-shims -pip-tools -pipenv -pipreqs -pkgconfig -pkginfo -pkgutil_resolve_name -plac -platformdirs -plette -plotly -pluggy -plumbum -poetry -poetry-core -poetry-plugin-export -pooch -pox -ppft -pprintpp -pptree -preshed -prettytable -prometheus-client -prompt-toolkit -prophet -proto-plus -protobuf -psutil -psycopg2 -ptyprocess -pure-eval -py -py-cpuinfo -py4j -pyarrow -pyarrow-hotfix -pyasn1 -pyasn1-modules -pybind11 -pycairo -pycodestyle -pycosat -pycparser -pycrdt -pycurl -pydantic -pydantic-core -pydot -pyflakes -pyFUME -Pygments -pygrib -PyHamcrest -pyHanko -pyhanko-certvalidator -pyinstaller -pyinstaller-hooks-contrib -pyLDAvis -pylev -pylint -pymc3 -PyMeeus -pymongo -pyodbc -PyOpenGL -pyparsing -pypdf -PyPDF2 -PyPDF3 -pypiwin32 -pypng -pyproj -pyproject_hooks -pyrsistent -pysha3 -pyshp -pysimdjson -pystan -pytest -pytest-pylint -python-barcode -python-bidi -python-dateutil -python-geohash -python-gitlab -python-json-logger -python-louvain -python-pkcs11 -pytoml -pytorch_revgrad -pytorch-pretrained-bert -pytorch-transformers -pytz -pytz-deprecation-shim -PyWavelets -pywin32 -pywin32-ctypes -pywinpty -PyYAML -pyzmq -qdldl -qrcode -rapidfuzz -rasterio -readme-renderer -referencing -regex -reportlab -repoze.lru -repro-catalogue -requests -requests-file -requests-ftp -requests-oauthlib -requests-toolbelt -requests-unixsocket -requirements-parser -requirementslib -responses -retrying -rfc3339-validator -rfc3986 -rfc3986-validator -rfc3987 -rich -rlPyCairo -rpds-py -rpy2 -rsa -Rtree -ruamel.base -ruamel.ordereddict -ruamel.yaml -ruamel.yaml.clib -s3fs -s3transfer -sacremoses -safetensors -safety -safety-schemas -scandir -scikit-image -scikit-learn -scikit-multilearn -scikit-survival -scikit-validate -scipy -scramp -seaborn -SecretStorage -segtok -semver -Send2Trash -sentencepiece -setuptools -setuptools-git -setuptools-scm -shapely -shellingham -simpful -simplegeneric -simplejson -singledispatch -six -sklearn -sklearn-contrib-lightning -smart-open -smmap -smmap2 -sniffio -snowballstemmer -snuggs -sortedcollections -sortedcontainers -soupsieve -spacy -spacy-langdetect -spacy-legacy -spacy-loggers -sparsesvd -Sphinx -sphinxcontrib-applehelp -sphinxcontrib-devhelp -sphinxcontrib-htmlhelp -sphinxcontrib-jsmath -sphinxcontrib-qthelp -sphinxcontrib-serializinghtml -sphinxcontrib-websupport -sputnik -SQLAlchemy -sqlitedict -srsly -stack-data -stanio -statsmodels -stumpy -subprocess32 -suod -svglib -sympy -syntok -tables -tabulate -tangled-up-in-unicode -tb-nightly -tblib -tdda -tempora -tenacity -tensorboard -tensorboard-data-server -tensorboard-plugin-wit -tensorflow -tensorflow-cpu-aws -tensorflow-estimator -tensorflow-gpu -tensorflow-gpu-estimator -tensorflow-intel -tensorflow-io-gcs-filesystem -tensorflow-tensorboard -termcolor -termcolor-whl -terminado -testpath -tf-estimator-nightly -tf-keras -tf-keras-nightly -tf-nightly -tf-nightly-cpu-aws -tf-nightly-intel -tf-nightly-macos -Theano -Theano-PyMC -thinc -threadpoolctl -tifffile -tika -tiny-tokenizer -tinycss2 -tokenize-rt -tokenizers -toml -tomli -tomlkit -toolz -torch -torchvision -tornado -tqdm -traitlets -transformer-smaller-training-vocab -transformers -triton -trove-classifiers -tsfresh -twine -Twisted -twisted-iocpsupport -typed-ast -typeguard -typer -types-docutils -types-PyYAML -types-setuptools -types-toml -typing -typing-extensions -typing-utils -tzdata -tzlocal -ujson -unicodecsv -unification -unittest2 -uproot -uproot-methods -uritools -urllib3 -uvloop -vincent -virtualenv -virtualenv-clone -visions -vistir -wasabi -wcwidth -weasel -webargs -webencodings -websocket-client -Werkzeug -wheel -Wikipedia-API -win_unicode_console -win32-setctime -woops -wordcloud -wrapt -xarray -xarray-einstats -xattr -xgboost -xhtml2pdf -xlrd -XlsxWriter -xxhash -xyzservices -y-py -yarg -yarl -yaspin -ydata-profiling -yg.lockfile -ypy-websocket -zc.lockfile -zict -zipfile36 -zipp -zope.interface diff --git a/environment_configs/package_lists/allowlist-full-r-cran-tier3.list b/environment_configs/package_lists/allowlist-full-r-cran-tier3.list deleted file mode 100644 index 4f0ccbacc9..0000000000 --- a/environment_configs/package_lists/allowlist-full-r-cran-tier3.list +++ /dev/null @@ -1,579 +0,0 @@ -abind -acepack -actuar -anytime -arrow -askpass -assertthat -backports -base64 -base64enc -bayesplot -bibtex -biganalytics -bigD -biglm -bigmemory -bigmemory.sri -bigtabulate -bindr -bindrcpp -BiocManager -bit -bit64 -bitops -blastula -blob -boot -BradleyTerry2 -brew -brglm -brio -broom -bslib -ca -cachem -callr -car -carData -caret -cartogram -caTools -cellranger -checkmate -chron -Ckmeans.1d.dp -class -classInt -cli -clipr -clisymbols -clock -cluster -coda -codetools -colorspace -colourpicker -colourvalues -commonmark -conflicted -conquer -covr -cpp11 -crayon -credentials -crosstalk -crul -csv -csvy -curl -CVST -data.table -dataCompareR -DataExplorer -datamods -DBI -dbplyr -ddalpha -debugme -deldir -DEoptimR -desc -devtools -diagram -DiagrammeR -DiagrammeRsvg -dials -DiceDesign -dichromat -diffobj -digest -dimRed -distributional -doMC -doParallel -dotCall64 -downlit -downloader -dplyr -DRR -DT -dtplyr -dygraphs -e1071 -ellipsis -EQL -esquisse -evaluate -expint -expm -extraDistr -extrafont -extrafontdb -fansi -farver -fastmap -fastmatch -feather -fields -filehash -float -fontawesome -fontBitstreamVera -fontLiberation -fontquiver -forcats -foreach -foreign -formatR -Formula -fs -furrr -futile.logger -futile.options -future -future.apply -gargle -gbRd -gdata -gdtools -generics -geojson -geojsonio -geojsonsf -geometries -geometry -geosphere -gert -getPass -gfonts -ggforce -ggmap -ggplot2 -ggrepel -ggridges -ggthemes -ggvis -gh -git2r -gitcreds -glmnet -globals -glue -gmodels -gmp -gnm -googledrive -googlePolylines -googlesheets4 -googleVis -gower -GPfit -gplots -gridBase -gridExtra -gridSVG -gsubfn -gt -gtable -gtools -hardhat -haven -here -hexbin -hflights -highlight -highr -Hmisc -hms -hrbrthemes -HSAUR2 -htmlTable -htmltools -htmlwidgets -httpcode -httpuv -httr -httr2 -hunspell -ICS -ids -igraph -infer -influenceR -ini -inline -ipred -IRdisplay -IRkernel -irlba -isoband -ISOcodes -iterators -itertools -janeaustenr -janitor -jpeg -jqr -jquerylib -jsonify -jsonlite -juicyjuice -kdensity -kernlab -KernSmooth -knitr -labeling -labelVector -Lahman -lambda.r -later -lattice -latticeExtra -lava -lazyeval -lazyWeave -leafem -leaflet -leaflet.providers -leafpop -leafsync -leaps -LearnBayes -lgr -lhs -lifecycle -linprog -listenv -lme4 -lmtest -log4r -logitnorm -loo -lpSolve -lubridate -lumberjack -lwgeom -magic -magrittr -mailR -manipulateWidget -mapdeck -mapproj -maps -mapview -markdown -MASS -mathjaxr -Matrix -MatrixExtra -MatrixModels -matrixStats -mcmc -MCMCpack -memoise -messaging -Metrics -mgcv -mime -miniUI -minqa -mitools -mlapi -mlbench -MLmetrics -mlr3 -mlr3measures -mlr3misc -mnormt -modeldata -ModelMetrics -modelr -MSSQL -multcomp -munsell -MVA -mvtnorm -nakagami -ncdf4 -ncmeta -network -networkD3 -nlme -nloptr -NMF -nnet -nor1mix -numDeriv -odbc -openssl -OpenStreetMap -openxlsx -packcircles -packrat -palmerpenguins -paradox -parallelly -parsnip -patchwork -pbdMPI -pbdZMQ -pbkrtest -pcaPP -permute -phosphoricons -pillar -pixiedust -pkgbuild -pkgconfig -pkgdown -pkgload -pkgmaker -PKI -plotly -pls -plumber -plyr -png -pointblank -polyclip -polycor -posterior -praise -prettyunits -pROC -processx -procmaps -prodlim -profileModel -profvis -progress -progressr -promises -prophet -proto -protolite -proxy -proxyC -PRROC -ps -psych -psychotools -purrr -quadprog -quanteda -quantmod -quantreg -QuickJSR -qvcalc -R.cache -R.methodsS3 -R.oo -R.utils -R6 -ragg -randomForest -ranger -RApiDatetime -rapidjsonr -rappdirs -raster -rasterVis -rbibutils -rcmdcheck -RColorBrewer -Rcpp -RcppEigen -RcppParallel -RcppRoll -RcppTOML -RCurl -Rdpack -reactable -reactR -readODS -readr -readxl -recipes -registry -relimp -rematch -rematch2 -remotes -repr -reprex -reshape -reshape2 -reticulate -rex -rgexf -rgl -RgoogleMaps -RhpcBLASctl -rio -rJava -rjson -RJSONIO -rlang -rle -rlecuyer -rmapshaper -RMariaDB -rmarkdown -Rmpi -RMySQL -RNetCDF -rngtools -robustbase -robustX -ROCR -roxygen2 -rpart -RPostgres -RPostgreSQL -rprojroot -rrcov -rsample -rsconnect -rsparse -RSpectra -RSQLite -rstan -rstanarm -rstantools -rstudioapi -rsvg -Rttf2pt1 -RUnit -rversions -rvest -s2 -sandwich -sass -satellite -scales -selectr -servr -sessioninfo -settings -sf -sfheaders -sfsmisc -shape -shiny -shinybusy -shinyjs -shinystan -shinythemes -shinyWidgets -slam -slider -sm -sna -snakecase -SnowballC -sodium -sourcetools -sp -spacyr -spam -SparseM -spData -spdep -sqldf -SQUAREM -StanHeaders -stars -statmod -statnet.common -stopwords -stringi -stringr -styler -survey -survival -svglite -svgPanZoom -swagger -sys -systemfonts -tensorA -terra -testthat -text2vec -textshaping -TH.data -threejs -tibble -tidymodels -tidyposterior -tidypredict -tidyr -tidyselect -tidytext -tidyverse -tidyxl -tikzDevice -timechange -timeDate -timeSeries -tinytex -tis -tmap -tmaptools -tmvnsim -tokenizers -triebeard -tsbox -tseries -TTR -ttutils -tune -tweenr -tzdb -udunits2 -units -univariateML -urlchecker -urltools -usethis -utf8 -uuid -V8 -validate -vcd -vctrs -vegalite -vegan -viridis -viridisLite -visNetwork -vroom -waldo -warp -webshot -webutils -whisker -widgetframe -winch -withr -wk -wordcloud -workflows -workflowsets -writexl -xfun -xgboost -XLConnect -xlsx -xlsxjars -XML -xml2 -xopen -xtable -xts -yaml -yardstick -zeallot -zip -zoo diff --git a/environment_configs/shm_blue_core_config.json b/environment_configs/shm_blue_core_config.json deleted file mode 100644 index 236d7bf9cb..0000000000 --- a/environment_configs/shm_blue_core_config.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "name": "Turing Data Safe Haven (Blue)", - "shmId": "blue", - "domain": "blue.develop.turingsafehaven.ac.uk", - "timezone": "Europe/London", - "azure": { - "activeDirectoryTenantId": "1d93a235-0c16-4f39-adff-a550719bdb49", - "adminGroupName": "Safe Haven Test Admins", - "location": "uksouth", - "subscriptionName": "Data Safe Haven Development" - }, - "organisation": { - "name": "The Alan Turing Institute", - "townCity": "London", - "stateCountyRegion": "London", - "countryCode": "GB", - "contactEmail": "safehavendevs@turing.ac.uk" - }, - "dnsRecords": { - "subscriptionName": "[Prod] Safe Haven Domains", - "resourceGroupName": "RG_SHM_DNS_TEST" - }, - "vmImages": { - "subscriptionName": "[Prod] Safe Haven VM Images" - }, - "docker": { - "account": "youraccountname", - "password": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0" - } -} diff --git a/environment_configs/shm_green_core_config.json b/environment_configs/shm_green_core_config.json deleted file mode 100644 index 182e8e5550..0000000000 --- a/environment_configs/shm_green_core_config.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "name": "Turing Data Safe Haven (Green)", - "shmId": "green", - "domain": "green.develop.turingsafehaven.ac.uk", - "timezone": "Europe/London", - "azure": { - "activeDirectoryTenantId": "cb94a6f6-ef7a-42ab-bcad-4f0b887cfd3e", - "adminGroupName": "Safe Haven Test Admins", - "location": "uksouth", - "subscriptionName": "Data Safe Haven Development" - }, - "organisation": { - "name": "The Alan Turing Institute", - "townCity": "London", - "stateCountyRegion": "London", - "countryCode": "GB", - "contactEmail": "safehavendevs@turing.ac.uk" - }, - "dnsRecords": { - "subscriptionName": "[Prod] Safe Haven Domains", - "resourceGroupName": "RG_SHM_DNS_TEST" - }, - "repositoryType": { - "tier2": "proxy", - "tier3": "mirror" - }, - "vmImages": { - "subscriptionName": "[Prod] Safe Haven VM Images" - }, - "docker": { - "account": "youraccountname", - "password": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0" - } -} diff --git a/environment_configs/sre_bluet1guac_core_config.json b/environment_configs/sre_bluet1guac_core_config.json deleted file mode 100644 index eefbbb439e..0000000000 --- a/environment_configs/sre_bluet1guac_core_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "sreId": "t1guac", - "tier": "1", - "shmId": "blue", - "subscriptionName": "Data Safe Haven Development", - "ipPrefix": "10.151.0", - "inboundAccessFrom": "Internet", - "outboundInternetAccess": "default", - "computeVmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "dataAdminIpAddresses": ["193.60.220.253"], - "databases": ["MSSQL", "PostgreSQL"] -} diff --git a/environment_configs/sre_bluet2guac_core_config.json b/environment_configs/sre_bluet2guac_core_config.json deleted file mode 100644 index ec431eef09..0000000000 --- a/environment_configs/sre_bluet2guac_core_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "sreId": "t2guac", - "tier": "2", - "shmId": "blue", - "subscriptionName": "Data Safe Haven Development", - "ipPrefix": "10.152.0", - "inboundAccessFrom": ["193.60.220.253"], - "outboundInternetAccess": "default", - "computeVmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "dataAdminIpAddresses": ["193.60.220.253"], - "databases": ["MSSQL", "PostgreSQL"] -} diff --git a/environment_configs/sre_bluet3guac_core_config.json b/environment_configs/sre_bluet3guac_core_config.json deleted file mode 100644 index 8bdf2a586c..0000000000 --- a/environment_configs/sre_bluet3guac_core_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "sreId": "t3guac", - "tier": "3", - "shmId": "blue", - "subscriptionName": "Data Safe Haven Development", - "ipPrefix": "10.153.0", - "inboundAccessFrom": ["193.60.220.240"], - "outboundInternetAccess": "default", - "computeVmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "dataAdminIpAddresses": ["193.60.220.240"], - "databases": ["MSSQL", "PostgreSQL"] -} diff --git a/environment_configs/sre_greent1guac_core_config.json b/environment_configs/sre_greent1guac_core_config.json deleted file mode 100644 index bf6ac297fb..0000000000 --- a/environment_configs/sre_greent1guac_core_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "sreId": "t1guac", - "tier": "1", - "shmId": "green", - "subscriptionName": "Data Safe Haven Development", - "ipPrefix": "10.151.0", - "inboundAccessFrom": "Internet", - "outboundInternetAccess": "default", - "computeVmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "dataAdminIpAddresses": ["193.60.220.253"], - "databases": ["MSSQL", "PostgreSQL"] -} diff --git a/environment_configs/sre_greent2guac_core_config.json b/environment_configs/sre_greent2guac_core_config.json deleted file mode 100644 index 7f44fb8ce4..0000000000 --- a/environment_configs/sre_greent2guac_core_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "sreId": "t2guac", - "tier": "2", - "shmId": "green", - "subscriptionName": "Data Safe Haven Development", - "ipPrefix": "10.152.0", - "inboundAccessFrom": ["193.60.220.253"], - "outboundInternetAccess": "default", - "computeVmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "dataAdminIpAddresses": ["193.60.220.253"], - "databases": ["MSSQL", "PostgreSQL"] -} diff --git a/environment_configs/sre_greent3guac_core_config.json b/environment_configs/sre_greent3guac_core_config.json deleted file mode 100644 index 4702a9d411..0000000000 --- a/environment_configs/sre_greent3guac_core_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "sreId": "t3guac", - "tier": "3", - "shmId": "green", - "subscriptionName": "Data Safe Haven Development", - "ipPrefix": "10.153.0", - "inboundAccessFrom": ["193.60.220.240"], - "outboundInternetAccess": "default", - "computeVmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "dataAdminIpAddresses": ["193.60.220.240"], - "databases": ["MSSQL", "PostgreSQL"] -} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..115fa5f42f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,234 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "data-safe-haven" +dynamic = ["version"] +description = "An open-source framework for creating secure environments to analyse sensitive data." +authors = [ + { name = "Data Safe Haven development team", email = "safehavendevs@turing.ac.uk" }, +] +requires-python = "==3.12.*" +license = { text = "BSD-3-Clause" } +dependencies = [ + "appdirs>=1.4", + "azure-core>=1.26", + "azure-identity>=1.16.1", + "azure-keyvault-certificates>=4.6", + "azure-keyvault-keys>=4.6", + "azure-keyvault-secrets>=4.6", + "azure-mgmt-automation>=1.0", + "azure-mgmt-compute>=30.3", + "azure-mgmt-containerinstance>=10.1", + "azure-mgmt-dns>=8.0", + "azure-mgmt-keyvault>=10.3", + "azure-mgmt-msi>=7.0", + "azure-mgmt-network>=25.0", + "azure-mgmt-rdbms>=10.1", + "azure-mgmt-resource>=23.0", + "azure-mgmt-storage>=21.1", + "azure-storage-blob>=12.15", + "azure-storage-file-datalake>=12.10", + "azure-storage-file-share>=12.10", + "chevron>=0.14", + "cryptography>=42.0", + "fqdn>=1.5", + "msal>=1.21", + "psycopg>=3.1", + "pulumi>=3.80", + "pulumi-azure-native>=2.49.1", + "pulumi-random>=4.14", + "pulumi-tls>=5.0.3", + "pydantic>=2.4", + "pyjwt>=2.8", + "pytz>=2023.3", + "PyYAML>=6.0", + "rich>=13.4", + "simple-acme-dns>=3.0", + "typer>=0.9", + "websocket-client>=1.5", +] + +[project.scripts] +dsh = "data_safe_haven.commands.cli:main" + +[tool.black] +target-version = ["py312"] + +[tool.coverage.paths] +source = ["data_safe_haven/"] + +[tool.coverage.run] +relative_files = true +omit= [ + "tests/*", + "data_safe_haven/resources/*", +] + +[tool.hatch.envs.default] +pre-install-commands = ["pip install -r requirements.txt"] + +[tool.hatch.envs.docs] +detached = true +dependencies = [ + "emoji>=2.10.0", + "myst-parser>=2.0.0", + "pydata-sphinx-theme>=0.15.0", + "Sphinx>=7.3.0", + "sphinx-togglebutton>0.3.0", +] +pre-install-commands = ["pip install -r docs/requirements.txt"] + +[tool.hatch.envs.docs.scripts] +build = [ + # Treat warnings as errors + "make -C docs html SPHINXOPTS='-W'" +] + +[tool.hatch.envs.lint] +detached = true +dependencies = [ + "ansible>=10.2.0", + "ansible-dev-tools>=24.7.1", + "black>=24.1.0", + "mypy>=1.0.0", + "pydantic>=2.4", + "ruff>=0.3.4", + "types-appdirs>=1.4.3.5", + "types-chevron>=0.14.2.5", + "types-pytz>=2023.3.0.0", + "types-PyYAML>=6.0.12.11", + "types-requests>=2.31.0.2", +] + +[tool.hatch.envs.lint.scripts] +all = [ + "style", + "typing", + "ansible", +] +ansible = "ansible-lint {args:data_safe_haven/resources/workspace/ansible/}" +fmt = [ + "black {args:data_safe_haven tests}", + "ruff check --fix {args:data_safe_haven tests}", + "style", +] +style = [ + "ruff check {args:data_safe_haven tests}", + "black --check --diff {args:data_safe_haven tests}", +] +typing = "mypy {args:data_safe_haven}" + +[tool.hatch.envs.test] +dependencies = [ + "coverage>=7.5.1", + "freezegun>=1.5", + "pytest>=8.1", + "pytest-mock>=3.14", + "requests-mock>=1.12", +] +pre-install-commands = ["pip install -r requirements.txt"] + +[tool.hatch.envs.test.scripts] +test = "coverage run -m pytest {args: tests}" +test-report = "coverage report {args:}" +test-coverage = ["test", "test-report"] + +[tool.hatch.version] +path = "data_safe_haven/version.py" + +[tool.mypy] +disallow_subclassing_any = false # allow subclassing of types from third-party libraries +files = "data_safe_haven" # run mypy over this directory +mypy_path = "typings" # use this directory for stubs +strict = true # enable all optional error checking flags + +[[tool.mypy.overrides]] +module = [ + "acme.*", + "azure.core.*", + "azure.identity.*", + "azure.keyvault.*", + "azure.mgmt.*", + "azure.storage.*", + "cryptography.*", + "dns.*", + "jwt.*", + "msal.*", + "numpy.*", + "pandas.*", + "psycopg.*", + "pulumi.*", + "pulumi_azure_native.*", + "pulumi_random.*", + "pulumi_tls.*", + "pymssql.*", + "rich.*", + "simple_acme_dns.*", + "sklearn.*", + "websocket.*", +] +ignore_missing_imports = true + +[tool.pytest.ini_options] +addopts = [ + "-vvv", + "--import-mode=importlib", + "--disable-warnings", + "--ignore=data_safe_haven/resources/*", +] + +[tool.ruff.lint] +select = [ + # See https://beta.ruff.rs/docs/rules/ + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "B", # flake8-bugbear + "C", # complexity, mcabe and flake8-comprehensions + "DTZ", # flake8-datetimez + "E", # pycodestyle errors + "EM", # flake8-errmsg + "F", # pyflakes + "FBT", # flake8-boolean-trap + "I", # isort + "ICN", # flake8-import-conventions + "ISC", # flake8-implicit-str-concat + "N", # pep8-naming + "PLC", # pylint convention + "PLE", # pylint error + "PLR", # pylint refactor + "PLW", # pylint warning + "Q", # flake8-quotes + "RUF", # ruff rules + "S", # flake8-bandits + "T", # flake8-debugger and flake8-print + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle warnings + "YTT", # flake8-2020 +] +ignore = [ + "E501", # ignore line length + "S106", # ignore check for possible passwords + "S603", # allow subprocess with shell=False, this is lower severity than those with shell=True + "C901", # ignore complex-structure + "PLR0912", # ignore too-many-branches + "PLR0913", # ignore too-many-arguments + "PLR0915", # ignore too-many-statements +] + +[tool.ruff.lint.flake8-tidy-imports] +ban-relative-imports = "parents" + +[tool.ruff.lint.isort] +combine-as-imports = true +known-first-party = ["data_safe_haven"] + +[tool.ruff.lint.per-file-ignores] +# Ignore the following for tests only +"tests/**/*" = [ + "PLR2004", # magic values + "S101", # assertions + "TID252", # relative imports +] diff --git a/requirements-constraints.txt b/requirements-constraints.txt new file mode 100644 index 0000000000..ebcbb8ff2e --- /dev/null +++ b/requirements-constraints.txt @@ -0,0 +1,29 @@ +# Use this file to specify constraints on packages that we do not directly depend on +# It will be used by pip-compile when solving the environment, but only if the package is required otherwise + +# Exclude azure-identity < 1.16.1 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/17) +azure-identity >=1.16.1 + +# Exclude cryptography < 42.0.0 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/8) +# Exclude cryptography < 42.0.2 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/9) +# Exclude cryptography >= 38.0.0, < 42.0.4 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/10) +cryptography >=42.0.4 + +# Exclude dnspython < 2.6.1 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/13) +dnspython >=2.6.1 + +# Exclude idna < 3.7 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/11) +idna >=3.7 + +# Exclude jinja < 3.1.3 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/7) +# Exclude jinja < 3.1.4 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/14) +jinja >=3.1.4 + + +# Exclude requests >= 2.3.0, < 2.31.0 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/3) +# Exclude requests < 2.32.0 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/15) +requests >=2.32.0 + +# Exclude urllib3 >= 2.0.0, < 2.0.6 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/5) +# Exclude urllib3 >= 2.0.0, < 2.2.2 (from https://github.com/alan-turing-institute/data-safe-haven/security/dependabot/18) +urllib3 !=2.0.*,!=2.1.*,!=2.2.0,!=2.2.1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..f0a3084a8c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,273 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --constraint=requirements-constraints.txt --output-file=requirements.txt pyproject.toml +# +acme==2.10.0 + # via simple-acme-dns +annotated-types==0.7.0 + # via pydantic +appdirs==1.4.4 + # via data-safe-haven (pyproject.toml) +arpeggio==2.0.2 + # via parver +attrs==24.1.0 + # via parver +azure-common==1.1.28 + # via + # azure-mgmt-automation + # azure-mgmt-compute + # azure-mgmt-containerinstance + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-msi + # azure-mgmt-network + # azure-mgmt-rdbms + # azure-mgmt-resource + # azure-mgmt-storage +azure-core==1.30.2 + # via + # azure-identity + # azure-keyvault-certificates + # azure-keyvault-keys + # azure-keyvault-secrets + # azure-mgmt-core + # azure-storage-blob + # azure-storage-file-datalake + # azure-storage-file-share + # data-safe-haven (pyproject.toml) + # msrest +azure-identity==1.17.1 + # via + # -c requirements-constraints.txt + # data-safe-haven (pyproject.toml) +azure-keyvault-certificates==4.8.0 + # via data-safe-haven (pyproject.toml) +azure-keyvault-keys==4.9.0 + # via data-safe-haven (pyproject.toml) +azure-keyvault-secrets==4.8.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-automation==1.0.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-compute==32.0.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-containerinstance==10.1.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-core==1.4.0 + # via + # azure-mgmt-automation + # azure-mgmt-compute + # azure-mgmt-containerinstance + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-msi + # azure-mgmt-network + # azure-mgmt-rdbms + # azure-mgmt-resource + # azure-mgmt-storage +azure-mgmt-dns==8.1.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-keyvault==10.3.1 + # via data-safe-haven (pyproject.toml) +azure-mgmt-msi==7.0.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-network==26.0.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-rdbms==10.1.0 + # via data-safe-haven (pyproject.toml) +azure-mgmt-resource==23.1.1 + # via data-safe-haven (pyproject.toml) +azure-mgmt-storage==21.2.1 + # via data-safe-haven (pyproject.toml) +azure-storage-blob==12.21.0 + # via + # azure-storage-file-datalake + # data-safe-haven (pyproject.toml) +azure-storage-file-datalake==12.16.0 + # via data-safe-haven (pyproject.toml) +azure-storage-file-share==12.17.0 + # via data-safe-haven (pyproject.toml) +certifi==2024.7.4 + # via + # msrest + # requests +cffi==1.16.0 + # via cryptography +charset-normalizer==3.3.2 + # via requests +chevron==0.14.0 + # via data-safe-haven (pyproject.toml) +click==8.1.7 + # via typer +cryptography==43.0.0 + # via + # -c requirements-constraints.txt + # acme + # azure-identity + # azure-keyvault-keys + # azure-storage-blob + # azure-storage-file-share + # data-safe-haven (pyproject.toml) + # josepy + # msal + # pyjwt + # pyopenssl +dill==0.3.8 + # via pulumi +dnspython==2.6.1 + # via + # -c requirements-constraints.txt + # simple-acme-dns +fqdn==1.5.1 + # via data-safe-haven (pyproject.toml) +grpcio==1.60.1 + # via pulumi +idna==3.7 + # via + # -c requirements-constraints.txt + # requests +isodate==0.6.1 + # via + # azure-keyvault-certificates + # azure-keyvault-keys + # azure-keyvault-secrets + # azure-mgmt-compute + # azure-mgmt-containerinstance + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-network + # azure-mgmt-resource + # azure-mgmt-storage + # azure-storage-blob + # azure-storage-file-datalake + # azure-storage-file-share + # msrest +josepy==1.14.0 + # via acme +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +msal==1.30.0 + # via + # azure-identity + # data-safe-haven (pyproject.toml) + # msal-extensions +msal-extensions==1.2.0 + # via azure-identity +msrest==0.7.1 + # via + # azure-mgmt-automation + # azure-mgmt-msi + # azure-mgmt-rdbms +oauthlib==3.2.2 + # via requests-oauthlib +parver==0.5 + # via + # pulumi-azure-native + # pulumi-random + # pulumi-tls +portalocker==2.10.1 + # via msal-extensions +protobuf==4.25.4 + # via pulumi +psycopg==3.2.1 + # via data-safe-haven (pyproject.toml) +pulumi==3.127.0 + # via + # data-safe-haven (pyproject.toml) + # pulumi-azure-native + # pulumi-random + # pulumi-tls +pulumi-azure-native==2.52.0 + # via data-safe-haven (pyproject.toml) +pulumi-random==4.16.3 + # via data-safe-haven (pyproject.toml) +pulumi-tls==5.0.4 + # via data-safe-haven (pyproject.toml) +pycparser==2.22 + # via cffi +pydantic==2.8.2 + # via data-safe-haven (pyproject.toml) +pydantic-core==2.20.1 + # via pydantic +pygments==2.18.0 + # via rich +pyjwt[crypto]==2.9.0 + # via + # data-safe-haven (pyproject.toml) + # msal +pyopenssl==24.2.1 + # via + # acme + # josepy +pyrfc3339==1.1 + # via acme +pytz==2024.1 + # via + # acme + # data-safe-haven (pyproject.toml) + # pyrfc3339 +pyyaml==6.0.1 + # via + # data-safe-haven (pyproject.toml) + # pulumi +requests==2.32.3 + # via + # -c requirements-constraints.txt + # acme + # azure-core + # msal + # msrest + # requests-oauthlib +requests-oauthlib==2.0.0 + # via msrest +rich==13.7.1 + # via + # data-safe-haven (pyproject.toml) + # typer +semver==2.13.0 + # via + # pulumi + # pulumi-azure-native + # pulumi-random + # pulumi-tls +shellingham==1.5.4 + # via typer +simple-acme-dns==3.1.0 + # via data-safe-haven (pyproject.toml) +six==1.16.0 + # via + # azure-core + # isodate + # pulumi +typer==0.12.3 + # via data-safe-haven (pyproject.toml) +typing-extensions==4.12.2 + # via + # azure-core + # azure-identity + # azure-keyvault-certificates + # azure-keyvault-keys + # azure-keyvault-secrets + # azure-mgmt-compute + # azure-mgmt-keyvault + # azure-storage-blob + # azure-storage-file-datalake + # azure-storage-file-share + # psycopg + # pydantic + # pydantic-core + # typer +urllib3==2.2.2 + # via + # -c requirements-constraints.txt + # requests +validators==0.28.3 + # via simple-acme-dns +websocket-client==1.8.0 + # via data-safe-haven (pyproject.toml) + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/tests/AutoFormat_Powershell.ps1 b/tests/AutoFormat_Powershell.ps1 deleted file mode 100644 index 99bd7f4aec..0000000000 --- a/tests/AutoFormat_Powershell.ps1 +++ /dev/null @@ -1,51 +0,0 @@ -param( - [Parameter(Mandatory = $false, HelpMessage = "Restrict to only run over files in the given path")] - [string]$TargetPath, - [Parameter(Mandatory = $false, HelpMessage = "Only alter line endings and file encodings")] - [switch]$EncodingOnly -) - -Import-Module PSScriptAnalyzer -ErrorAction Stop -Import-Module $PSScriptRoot/../deployment/common/Logging -ErrorAction Stop - - -# Set the root path which we will format -# -------------------------------------- -$CodeRootPath = $TargetPath ? $TargetPath : (Join-Path -Path (Get-Item $PSScriptRoot).Parent -ChildPath "deployment") - - -# Formatter settings -# ------------------ -$FileExtensions = @("*.ps1", "*.psm1", "*.psd1") -$SettingsPath = Join-Path -Path (Get-Item $PSScriptRoot).Parent -ChildPath ".PSScriptFormatterSettings.psd1" -$PowershellFilePaths = @(Get-ChildItem -Path $CodeRootPath -Include $FileExtensions -Recurse | Select-Object -ExpandProperty FullName) - - -# Run Invoke-Formatter on all files -# --------------------------------- -foreach ($PowershellFilePath in $PowershellFilePaths) { - $Unformatted = Get-Content -Path $PowershellFilePath -Raw - - # Detect the end-of-line marker and strip empty lines - $EOLMarker = $Unformatted -match "\r\n$" ? "`r`n" : "`n" - $Formatted = $Unformatted -replace "(?s)$EOLMarker\s*$" - - # Call formatter - if (-not $EncodingOnly) { - $Formatted = Invoke-Formatter -ScriptDefinition $Formatted -Settings $SettingsPath - } - - # Set correct line endings and correct encoding. - # Omitting the Byte Order Mark gives better cross-platform compatibility but Windows scripts need it - # We use Set-Content instead of Out-File so that we can write line-endings that are not the platform default - $Encoding = $EOLMarker -eq "`r`n" ? "UTF8BOM" : "UTF8NoBOM" - $Formatted.Replace("`r`n", "`r`r").Replace("`n", "`r`r").Replace("`r`r", $EOLMarker) | Set-Content -Path $PowershellFilePath -Encoding $Encoding - - # Check whether any changes were made to the file - $Formatted = Get-Content -Path $PowershellFilePath -Raw - if ($Formatted -ne $Unformatted) { - Add-LogMessage -Level Info "Formatting ${PowershellFilePath}..." - } else { - Add-LogMessage -Level Info "${PowershellFilePath} is already formatted" - } -} diff --git a/tests/Compare_Deployments.ps1 b/tests/Compare_Deployments.ps1 deleted file mode 100644 index 94d87fffd7..0000000000 --- a/tests/Compare_Deployments.ps1 +++ /dev/null @@ -1,335 +0,0 @@ -# Parameter sets in Powershell are a bit counter-intuitive. See here (https://docs.microsoft.com/en-us/powershell/scripting/developer/cmdlet/cmdlet-parameter-sets?view=powershell-7) for details -param( - [Parameter(Mandatory = $true, HelpMessage = "Name of the test (proposed) subscription")] - [string]$Subscription, - [Parameter(ParameterSetName = "BenchmarkSubscription", Mandatory = $true, HelpMessage = "Name of the benchmark subscription to compare against")] - [string]$BenchmarkSubscription, - [Parameter(ParameterSetName = "BenchmarkConfig", Mandatory = $true, HelpMessage = "Path to the benchmark config to compare against")] - [string]$BenchmarkConfig, - [Parameter(Mandatory = $false, HelpMessage = "Print verbose logging messages")] - [switch]$VerboseLogging = $false -) - -# Install required modules -if (-not $(Get-Module -ListAvailable -Name Az)) { Install-Package Az -Force } -if (-not $(Get-Module -ListAvailable -Name Communary.PASM)) { Install-Package Communary.PASM -Force } - -# Import modules -Import-Module Az.Accounts -ErrorAction Stop -Import-Module Az.Compute -ErrorAction Stop -Import-Module Az.Network -ErrorAction Stop -Import-Module Communary.PASM -ErrorAction Stop -Import-Module $PSScriptRoot/../deployment/common/Logging -Force -ErrorAction Stop - -function Select-ClosestMatch { - param ( - [Parameter()] - [ValidateNotNullOrEmpty()] - [string]$Value, - [Parameter()] - [ValidateNotNullOrEmpty()] - [System.Array]$Array - ) - $Array | Sort-Object @{Expression = { Get-PasmScore -String1 $Value -String2 $_ -Algorithm "LevenshteinDistance" }; Ascending = $false } | Select-Object -First 1 -} - -# Compare two NSG rule sets -# Match parameter-by-parameter -# -------------------------------------------- -function Compare-NSGRules { - param ( - [Parameter()] - [System.Array]$BenchmarkRules, - [Parameter()] - [System.Array]$TestRules - ) - $nMatched = 0 - $unmatched = @() - foreach ($benchmarkRule in $BenchmarkRules) { - $lowestDifference = [double]::PositiveInfinity - $closestMatchingRule = $null - # Iterate over TestRules checking for an identical match by checking how many of the rule parameters differ - # If an exact match is found then increment the counter, otherwise log the rule and the closest match - foreach ($testRule in $TestRules) { - $difference = 0 - if ($benchmarkRule.Protocol -ne $testRule.Protocol) { $difference += 1 } - if ([string]($benchmarkRule.SourcePortRange) -ne [string]($testRule.SourcePortRange)) { $difference += 1 } - if ([string]($benchmarkRule.DestinationPortRange) -ne [string]($testRule.DestinationPortRange)) { $difference += 1 } - if ([string]($benchmarkRule.SourceAddressPrefix) -ne [string]($testRule.SourceAddressPrefix)) { $difference += 1 } - if ([string]($benchmarkRule.DestinationAddressPrefix) -ne [string]($testRule.DestinationAddressPrefix)) { $difference += 1 } - if ($benchmarkRule.Access -ne $testRule.Access) { $difference += 1 } - if ($benchmarkRule.Priority -ne $testRule.Priority) { $difference += 1 } - if ($benchmarkRule.Direction -ne $testRule.Direction) { $difference += 1 } - if ($difference -lt $lowestDifference) { - $lowestDifference = $difference - $closestMatchingRule = $testRule - } - if ($difference -eq 0) { break } - } - - if ($lowestDifference -eq 0) { - $nMatched += 1 - if ($VerboseLogging) { Add-LogMessage -Level Info "Found matching rule for $($benchmarkRule.Name)" } - } else { - Add-LogMessage -Level Error "Could not find an identical rule for $($benchmarkRule.Name)" - $unmatched += $benchmarkRule.Name - $benchmarkRule | Out-String - Add-LogMessage -Level Info "Closest match was:" - $closestMatchingRule | Out-String - } - } - - $nTotal = $nMatched + $unmatched.Count - if ($nMatched -eq $nTotal) { - Add-LogMessage -Level Success "Matched $nMatched/$nTotal rules" - } else { - Add-LogMessage -Level Failure "Matched $nMatched/$nTotal rules" - } -} - - -function Test-OutboundConnection { - param ( - [Parameter()] - [ValidateNotNullOrEmpty()] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine]$VM, - [Parameter()] - [ValidateNotNullOrEmpty()] - [string]$DestinationAddress, - [Parameter()] - [ValidateNotNullOrEmpty()] - [string]$DestinationPort - ) - # Get the network watcher, creating a new one if required - $networkWatcher = Get-AzNetworkWatcher | Where-Object { $_.Location -eq $VM.Location } - if (-not $networkWatcher) { - $networkWatcher = New-AzNetworkWatcher -Name "NetworkWatcher" -ResourceGroupName "NetworkWatcherRG" -Location $VM.Location - } - # Ensure that the VM has the extension installed (if we have permissions for this) - $networkWatcherExtension = Get-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name | Where-Object { ($_.Publisher -eq "Microsoft.Azure.NetworkWatcher") -and ($_.ProvisioningState -eq "Succeeded") } - if (-not $networkWatcherExtension) { - Add-LogMessage -Level Info "... registering the Azure NetworkWatcher extension on $($VM.Name). " - # Add the Windows extension - if ($VM.OSProfile.WindowsConfiguration) { - $null = Set-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -Location $VM.Location -Name "AzureNetworkWatcherExtension" -Publisher "Microsoft.Azure.NetworkWatcher" -Type "NetworkWatcherAgentWindows" -TypeHandlerVersion "1.4" -ErrorVariable NotInstalled -ErrorAction SilentlyContinue - if ($NotInstalled) { - Add-LogMessage -Level Warning "Unable to register Windows network watcher extension for $($VM.Name)" - return "Unknown" - } - } - # Add the Linux extension - if ($VM.OSProfile.LinuxConfiguration) { - $null = Set-AzVMExtension -ResourceGroupName $VM.ResourceGroupName -VMName $VM.Name -Location $VM.Location -Name "AzureNetworkWatcherExtension" -Publisher "Microsoft.Azure.NetworkWatcher" -Type "NetworkWatcherAgentLinux" -TypeHandlerVersion "1.4" -ErrorVariable NotInstalled -ErrorAction SilentlyContinue - if ($NotInstalled) { - Add-LogMessage -Level Warning "Unable to register Linux network watcher extension for $($VM.Name)" - return "Unknown" - } - } - } - Add-LogMessage -Level Info "... testing connectivity on port $DestinationPort" - $networkCheck = Test-AzNetworkWatcherConnectivity -NetworkWatcher $networkWatcher -SourceId $VM.Id -DestinationAddress $DestinationAddress -DestinationPort $DestinationPort -ErrorVariable NotAvailable -ErrorAction SilentlyContinue - if ($NotAvailable) { - Add-LogMessage -Level Warning "Unable to test connection for $($VM.Name)" - return "Unknown" - } else { - return $networkCheck.ConnectionStatus - } -} - -function Convert-RuleToEffectiveRule { - param ( - [Parameter(Position = 0)][ValidateNotNullOrEmpty()] - [System.Object] $rule - ) - $effectiveRule = [Microsoft.Azure.Commands.Network.Models.PSEffectiveSecurityRule]::new() - $effectiveRule.Name = $rule.Name - $effectiveRule.Protocol = $rule.Protocol.Replace("*", "All") - # Source port range - $effectiveRule.SourcePortRange = New-Object System.Collections.Generic.List[string] - foreach ($port in $rule.SourcePortRange) { - # We do not explicitly deal with the case where the port is not an integer, a range or '*' - if ($port -eq "*") { $effectiveRule.SourcePortRange.Add("0-65535"); break } - elseif ($port.Contains("-")) { $effectiveRule.SourcePortRange.Add($port) } - else { $effectiveRule.SourcePortRange.Add("$port-$port") } - } - # Destination port range - $effectiveRule.DestinationPortRange = New-Object System.Collections.Generic.List[string] - foreach ($port in $rule.DestinationPortRange) { - # We do not explicitly deal with the case where the port is not an integer, a range or '*' - if ($port -eq "*") { $effectiveRule.DestinationPortRange.Add("0-65535"); break } - elseif ($port.Contains("-")) { $effectiveRule.DestinationPortRange.Add($port) } - else { $effectiveRule.DestinationPortRange.Add("$port-$port") } - } - # Source address prefix - $effectiveRule.SourceAddressPrefix = New-Object System.Collections.Generic.List[string] - foreach ($prefix in $rule.SourceAddressPrefix) { - if ($prefix -eq "0.0.0.0/0") { $effectiveRule.SourceAddressPrefix.Add("*"); break } - else { $effectiveRule.SourceAddressPrefix.Add($rule.SourceAddressPrefix) } - } - # Destination address prefix - $effectiveRule.DestinationAddressPrefix = New-Object System.Collections.Generic.List[string] - foreach ($prefix in $rule.DestinationAddressPrefix) { - if ($prefix -eq "0.0.0.0/0") { $effectiveRule.DestinationAddressPrefix.Add("*"); break } - else { $effectiveRule.DestinationAddressPrefix.Add($rule.DestinationAddressPrefix) } - } - $effectiveRule.Access = $rule.Access - $effectiveRule.Priority = $rule.Priority - $effectiveRule.Direction = $rule.Direction - return $effectiveRule -} - - -function Get-NSGRules { - param ( - [Parameter(Position = 0)][ValidateNotNullOrEmpty()] - [Microsoft.Azure.Commands.Compute.Models.PSVirtualMachine] $VM - ) - $effectiveNSG = Get-AzEffectiveNetworkSecurityGroup -NetworkInterfaceName ($VM.NetworkProfile.NetworkInterfaces.Id -Split '/')[-1] -ResourceGroupName $VM.ResourceGroupName -ErrorVariable NotAvailable -ErrorAction SilentlyContinue - if ($NotAvailable) { - # Not able to get effective rules so we'll construct them by hand - $rules = @() - # Get rules from NSG directly attached to the NIC - $nic = Get-AzNetworkInterface | Where-Object { $_.Id -eq $VM.NetworkProfile.NetworkInterfaces.Id } - $directNsgs = Get-AzNetworkSecurityGroup | Where-Object { $_.Id -eq $nic.NetworkSecurityGroup.Id } - $directNsgRules = @() - foreach ($directNsg in $directNsgs) { - $directNsgRules = $directNsgRules + $directNsg.SecurityRules + $directNsg.DefaultSecurityRules - } - # Get rules from NSG attached to the subnet - $subnetNsgs = Get-AzNetworkSecurityGroup | Where-Object { $_.Subnets.Id -eq $nic.IpConfigurations.Subnet.Id } - $subnetNsgRules = @() - foreach ($subnetNsg in $subnetNsgs) { - $subnetNsgRules = $subnetNsgRules + $subnetNsg.SecurityRules + $subnetNsg.DefaultSecurityRules - } - $effectiveRules = @() - if ($directNsgRules.Count -And $subnetNsgRules.Count) { - Add-LogMessage -Level Warning "Found both NSG rules from both the NIC and the subnet for $($VM.Name). Evaluation of effective rules may be incorrect!" - } - # Convert each PSSecurityRule into a PSEffectiveSecurityRule - foreach ($rule in ($directNsgRules + $subnetNsgRules)) { - $effectiveRules = $effectiveRules + $(Convert-RuleToEffectiveRule $rule) - } - return $effectiveRules - } else { - $effectiveRules = $effectiveNSG.EffectiveSecurityRules - # Sometimes the address prefix is retrieved as ("0.0.0.0/0", "0.0.0.0/0") rather than "*" (although these mean the same thing) - foreach ($effectiveRule in $effectiveRules) { - if ($effectiveRule.SourceAddressPrefix[0] -eq "0.0.0.0/0") { $effectiveRule.SourceAddressPrefix.Clear(); $effectiveRule.SourceAddressPrefix.Add("*") } - if ($effectiveRule.DestinationAddressPrefix[0] -eq "0.0.0.0/0") { $effectiveRule.DestinationAddressPrefix.Clear(); $effectiveRule.DestinationAddressPrefix.Add("*") } - } - return $effectiveRules - } -} - -# Get original context before switching subscription -# -------------------------------------------------- -$originalContext = Get-AzContext - - -# Load configuration from a benchmark subscription or config -# ---------------------------------------------------------- -if ($BenchmarkSubscription) { - $JsonConfig = [ordered]@{} - # Get VMs in current subscription - $null = Set-AzContext -SubscriptionId $BenchmarkSubscription -ErrorAction Stop - $benchmarkVMs = Get-AzVM | Where-Object { $_.Name -NotLike "*shm-deploy*" } - Add-LogMessage -Level Info "Found $($benchmarkVMs.Count) VMs in subscription: '$BenchmarkSubscription'" - foreach ($VM in $benchmarkVMs) { - Add-LogMessage -Level Info "... $($VM.Name)" - } - # Get the NSG rules and connectivity for each VM in the subscription - foreach ($benchmarkVM in $benchmarkVMs) { - Add-LogMessage -Level Info "Getting NSG rules and connectivity for $($VM.Name)" - $JsonConfig[$benchmarkVM.Name] = [ordered]@{ - InternetFromPort = [ordered]@{ - "80" = (Test-OutboundConnection -VM $benchmarkVM -DestinationAddress "google.com" -DestinationPort 80) - "443" = (Test-OutboundConnection -VM $benchmarkVM -DestinationAddress "google.com" -DestinationPort 443) - } - Rules = Get-NSGRules -VM $benchmarkVM - } - } - $OutputFile = New-TemporaryFile - Out-File -FilePath $OutputFile -Encoding "UTF8" -InputObject ($JsonConfig | ConvertTo-Json -Depth 99) - Add-LogMessage -Level Info "Configuration file generated at '$($OutputFile.FullName)'" - $BenchmarkJsonPath = $OutputFile.FullName -} elseif ($BenchmarkConfig) { - $BenchmarkJsonPath = $BenchmarkConfig -} - - -# Deserialise VMs from JSON config -# -------------------------------- -$BenchmarkJsonConfig = Get-Content -Path $BenchmarkJsonPath -Raw -Encoding UTF-8 | ConvertFrom-Json -$benchmarkVMs = @() -foreach ($JsonVm in $BenchmarkJsonConfig.PSObject.Properties) { - $VM = New-Object -TypeName PsObject - $VM | Add-Member -MemberType NoteProperty -Name Name -Value $JsonVm.Name - $VM | Add-Member -MemberType NoteProperty -Name InternetFromPort -Value @{} - $VM.InternetFromPort.80 = $JsonVm.PSObject.Properties.Value.InternetFromPort.80 - $VM.InternetFromPort.443 = $JsonVm.PSObject.Properties.Value.InternetFromPort.443 - $VM | Add-Member -MemberType NoteProperty -Name Rules -Value @() - foreach ($rule in $JsonVm.PSObject.Properties.Value.Rules) { - if ($rule.Name) { $VM.Rules += $(Convert-RuleToEffectiveRule $rule) } - } - $benchmarkVMs += $VM -} - - -# Get VMs in test SHM -# ------------------- -$null = Set-AzContext -SubscriptionId $Subscription -ErrorAction Stop -$testVMs = Get-AzVM -Add-LogMessage -Level Info "Found $($testVMs.Count) VMs in subscription: '$Subscription'" -foreach ($VM in $testVMs) { - Add-LogMessage -Level Info "... $($VM.Name)" -} - - -# Create a hash table which maps test VMs to benchmark ones -# --------------------------------------------------------- -$vmHashTable = @{} -foreach ($testVM in $testVMs) { - $nameToCheck = $testVM.Name - # Only match against names that have not been matched yet - $benchmarkVMNames = $benchmarkVMs | ForEach-Object { $_.Name } | Where-Object { ($vmHashTable.Values | ForEach-Object { $_.Name }) -NotContains $_ } - $benchmarkVM = $benchmarkVMs | Where-Object { $_.Name -eq $(Select-ClosestMatch -Array $benchmarkVMNames -Value $nameToCheck) } - $vmHashTable[$testVM] = $benchmarkVM - Add-LogMessage -Level Info "matched $($testVM.Name) => $($benchmarkVM.Name)" -} - - -# Iterate over paired VMs checking their network settings -# ------------------------------------------------------- -foreach ($testVM in $testVMs) { - $benchmarkVM = $vmHashTable[$testVM] - - # Get parameters for new VM - # ------------------------- - $null = Set-AzContext -SubscriptionId $Subscription -ErrorAction Stop - Add-LogMessage -Level Info "Getting NSG rules and connectivity for $($testVM.Name)" - $testRules = Get-NSGRules -VM $testVM - # Check that each NSG rule has a matching equivalent (which might be named differently) - Add-LogMessage -Level Info "Comparing NSG rules for $($benchmarkVM.Name) and $($testVM.Name)" - Add-LogMessage -Level Info "... ensuring that all $($benchmarkVM.Name) rules exist on $($testVM.Name)" - Compare-NSGRules -BenchmarkRules $benchmarkVM.Rules -TestRules $testRules - Add-LogMessage -Level Info "... ensuring that all $($testVM.Name) rules exist on $($benchmarkVM.Name)" - Compare-NSGRules -BenchmarkRules $testRules -TestRules $benchmarkVM.Rules - - # Check that internet connectivity is the same for matched VMs - Add-LogMessage -Level Info "Comparing internet connectivity for $($benchmarkVM.Name) and $($testVM.Name)..." - # Test internet access on ports 80 and 443 - foreach ($port in (80, 443)) { - $testInternet = Test-OutboundConnection -VM $testVM -DestinationAddress "google.com" -DestinationPort $port - if ($benchmarkVM.InternetFromPort[$port] -eq $testInternet) { - Add-LogMessage -Level Success "The internet is '$($benchmarkVM.InternetFromPort[$port])' on port $port from both" - } else { - Add-LogMessage -Level Failure "The internet is '$($benchmarkVM.InternetFromPort[$port])' on port $port from $($benchmarkVM.Name)" - Add-LogMessage -Level Failure "The internet is '$($testInternet)' on port $port from $($testVM.Name)" - } - } -} - - -# Switch back to original subscription -# ------------------------------------ -$null = Set-AzContext -Context $originalContext -ErrorAction Stop diff --git a/tests/Run_Pester_Tests.ps1 b/tests/Run_Pester_Tests.ps1 deleted file mode 100644 index 1e79a92dde..0000000000 --- a/tests/Run_Pester_Tests.ps1 +++ /dev/null @@ -1,37 +0,0 @@ -#Requires -Version 7.0.0 -#Requires -Modules @{ ModuleName="Az.RecoveryServices"; ModuleVersion="1.3.0" } -#Requires -Modules @{ ModuleName="Pester"; ModuleVersion="5.1.0" } -#Requires -Modules @{ ModuleName="PSScriptAnalyzer"; ModuleVersion="1.19.0" } - -# Parameter sets in Powershell are a bit counter-intuitive. See here (https://docs.microsoft.com/en-us/powershell/scripting/developer/cmdlet/cmdlet-parameter-sets?view=powershell-7) for details -param( - [Parameter(Mandatory = $false, HelpMessage = "Name of the test(s) to run")] - [string]$TestNameContains -) - -# Set up a Pester run block with one parameter -# -------------------------------------------- -$pesterBlock = { - param($RunPath, $TestNameContains) - Import-Module Pester -ErrorAction Stop - - # Configuration with one parameter - $configuration = [PesterConfiguration]::Default - $configuration.Output.Verbosity = "Detailed" - $configuration.Run.PassThru = $true - $configuration.Run.Path = $RunPath - if ($TestNameContains) { $configuration.Filter.FullName = "*${TestNameContains}*" } - - # Run Pester - $results = Invoke-Pester -Configuration $configuration - if ($results.Result -eq "Failed") { - throw "Tests Passed: $($results.PassedCount), Failed: $($results.FailedCount), Skipped: $($results.Skipped.Count) NotRun: $($results.NotRun.Count)" - } -} - - -# Run Pester tests in a fresh Powershell context -# ---------------------------------------------- -$job = Start-Job -ScriptBlock $pesterBlock -ArgumentList @((Join-Path $PSScriptRoot "pester"), $TestNameContains) -$job | Receive-Job -Wait -AutoRemoveJob -if ($job.State -eq "Failed") { exit 1 } diff --git a/tests/benchmark_shm.json b/tests/benchmark_shm.json deleted file mode 100644 index 0cb1bef70a..0000000000 --- a/tests/benchmark_shm.json +++ /dev/null @@ -1,2861 +0,0 @@ -{ - "DC1-SHM-turing1": { - "InternetFromPort": { - "80": "Reachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "RPC_endpoint_mapper", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "135-135" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 200, - "Direction": "Inbound" - }, - { - "Name": "LDAP", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "389-389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 201, - "Direction": "Inbound" - }, - { - "Name": "LDAP_Ping", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "389-389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 202, - "Direction": "Inbound" - }, - { - "Name": "LDAP_over_SSL", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "636-636" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 203, - "Direction": "Inbound" - }, - { - "Name": "Global_catalog_LDAP", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3268-3268" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 204, - "Direction": "Inbound" - }, - { - "Name": "Global_catalog_LDAP_over_SSL", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3269-3269" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 205, - "Direction": "Inbound" - }, - { - "Name": "DNS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "53-53" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 206, - "Direction": "Inbound" - }, - { - "Name": "Kerberos", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "88-88" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 207, - "Direction": "Inbound" - }, - { - "Name": "SMB_over_IP_Microsoft-DS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "445-445" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 208, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_service", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "137-137" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 209, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_datagram_service", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "138-138" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 210, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_session_service", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "139-139" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 211, - "Direction": "Inbound" - }, - { - "Name": "RPC_for_LSA_SAM_Netlogon", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "49152-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 212, - "Direction": "Inbound" - }, - { - "Name": "Kerberos_Password_Change", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "464-464" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 213, - "Direction": "Inbound" - }, - { - "Name": "Active_Directory_Web_Services", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "9389-9389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 214, - "Direction": "Inbound" - }, - { - "Name": "RADIUS_Authenitcation_RDS_to_NPS", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "1645-1645", - "1646-1646", - "1812-1812", - "1813-1813" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Inbound" - }, - { - "Name": "Remote_Desktop_Connection", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3389-3389" - ], - "SourceAddressPrefix": [ - "172.16.201.0/24" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 400, - "Direction": "Inbound" - }, - { - "Name": "Deny_All", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "DC2-SHM-turing1": { - "InternetFromPort": { - "80": "Reachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "RPC_endpoint_mapper", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "135-135" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 200, - "Direction": "Inbound" - }, - { - "Name": "LDAP", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "389-389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 201, - "Direction": "Inbound" - }, - { - "Name": "LDAP_Ping", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "389-389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 202, - "Direction": "Inbound" - }, - { - "Name": "LDAP_over_SSL", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "636-636" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 203, - "Direction": "Inbound" - }, - { - "Name": "Global_catalog_LDAP", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3268-3268" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 204, - "Direction": "Inbound" - }, - { - "Name": "Global_catalog_LDAP_over_SSL", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3269-3269" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 205, - "Direction": "Inbound" - }, - { - "Name": "DNS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "53-53" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 206, - "Direction": "Inbound" - }, - { - "Name": "Kerberos", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "88-88" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 207, - "Direction": "Inbound" - }, - { - "Name": "SMB_over_IP_Microsoft-DS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "445-445" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 208, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_service", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "137-137" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 209, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_datagram_service", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "138-138" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 210, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_session_service", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "139-139" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 211, - "Direction": "Inbound" - }, - { - "Name": "RPC_for_LSA_SAM_Netlogon", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "49152-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 212, - "Direction": "Inbound" - }, - { - "Name": "Kerberos_Password_Change", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "464-464" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 213, - "Direction": "Inbound" - }, - { - "Name": "Active_Directory_Web_Services", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "9389-9389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 214, - "Direction": "Inbound" - }, - { - "Name": "RADIUS_Authenitcation_RDS_to_NPS", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "1645-1645", - "1646-1646", - "1812-1812", - "1813-1813" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Inbound" - }, - { - "Name": "Remote_Desktop_Connection", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3389-3389" - ], - "SourceAddressPrefix": [ - "172.16.201.0/24" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 400, - "Direction": "Inbound" - }, - { - "Name": "Deny_All", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "NPS-SHM-turing1": { - "InternetFromPort": { - "80": "Reachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "RPC_endpoint_mapper", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "135-135" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 200, - "Direction": "Inbound" - }, - { - "Name": "LDAP", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "389-389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 201, - "Direction": "Inbound" - }, - { - "Name": "LDAP_Ping", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "389-389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 202, - "Direction": "Inbound" - }, - { - "Name": "LDAP_over_SSL", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "636-636" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 203, - "Direction": "Inbound" - }, - { - "Name": "Global_catalog_LDAP", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3268-3268" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 204, - "Direction": "Inbound" - }, - { - "Name": "Global_catalog_LDAP_over_SSL", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3269-3269" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 205, - "Direction": "Inbound" - }, - { - "Name": "DNS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "53-53" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 206, - "Direction": "Inbound" - }, - { - "Name": "Kerberos", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "88-88" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 207, - "Direction": "Inbound" - }, - { - "Name": "SMB_over_IP_Microsoft-DS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "445-445" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 208, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_service", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "137-137" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 209, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_datagram_service", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "138-138" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 210, - "Direction": "Inbound" - }, - { - "Name": "NetBIOS_session_service", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "139-139" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 211, - "Direction": "Inbound" - }, - { - "Name": "RPC_for_LSA_SAM_Netlogon", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "49152-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 212, - "Direction": "Inbound" - }, - { - "Name": "Kerberos_Password_Change", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "464-464" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 213, - "Direction": "Inbound" - }, - { - "Name": "Active_Directory_Web_Services", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "9389-9389" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 214, - "Direction": "Inbound" - }, - { - "Name": "RADIUS_Authenitcation_RDS_to_NPS", - "Protocol": "Udp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "1645-1645", - "1646-1646", - "1812-1812", - "1813-1813" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Inbound" - }, - { - "Name": "Remote_Desktop_Connection", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "3389-3389" - ], - "SourceAddressPrefix": [ - "172.16.201.0/24" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 400, - "Direction": "Inbound" - }, - { - "Name": "Deny_All", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "CRAN-MIRROR-EXTERNAL-TIER-2": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "IgnoreInboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "updateOutbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "443-443", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.2.0/28" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Outbound" - }, - { - "Name": "IgnoreOutboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Outbound" - }, - { - "Name": "rsyncOutbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "22-22", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.2.0/28" - ], - "DestinationAddressPrefix": [ - "10.20.2.16/28" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 400, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "CRAN-MIRROR-INTERNAL-TIER-2": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "rsyncInbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "22-22", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.2.0/28" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 200, - "Direction": "Inbound" - }, - { - "Name": "mirrorRequestsInbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "80-80", - "443-443", - "3128-3128" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Inbound" - }, - { - "Name": "IgnoreInboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "IgnoreOutboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "PYPI-MIRROR-EXTERNAL-TIER-2": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "IgnoreInboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "updateOutbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "443-443", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.2.0/28" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Outbound" - }, - { - "Name": "IgnoreOutboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Outbound" - }, - { - "Name": "rsyncOutbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "22-22", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.2.0/28" - ], - "DestinationAddressPrefix": [ - "10.20.2.16/28" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 400, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "PYPI-MIRROR-EXTERNAL-TIER-3": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "IgnoreInboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "updateOutbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "443-443", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.3.0/28" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Outbound" - }, - { - "Name": "IgnoreOutboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Outbound" - }, - { - "Name": "rsyncOutbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "22-22", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.3.0/28" - ], - "DestinationAddressPrefix": [ - "10.20.3.16/28" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 400, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "PYPI-MIRROR-INTERNAL-TIER-2": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "rsyncInbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "22-22", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.2.0/28" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 200, - "Direction": "Inbound" - }, - { - "Name": "mirrorRequestsInbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "80-80", - "443-443", - "3128-3128" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Inbound" - }, - { - "Name": "IgnoreInboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "IgnoreOutboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "PYPI-MIRROR-INTERNAL-TIER-3": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "rsyncInbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "22-22", - "873-873" - ], - "SourceAddressPrefix": [ - "10.20.3.0/28" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 200, - "Direction": "Inbound" - }, - { - "Name": "mirrorRequestsInbound", - "Protocol": "Tcp", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "80-80", - "443-443", - "3128-3128" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Inbound" - }, - { - "Name": "IgnoreInboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Inbound" - }, - { - "Name": "IgnoreOutboundRulesBelowHere", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 3000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - } -} \ No newline at end of file diff --git a/tests/benchmark_sre.json b/tests/benchmark_sre.json deleted file mode 100644 index 3e9722d505..0000000000 --- a/tests/benchmark_sre.json +++ /dev/null @@ -1,1104 +0,0 @@ -{ - "SRE-SREID-160-SRD-VERSION": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "Internet_Out", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 4000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "DAT-SRE-SREID": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "Deny_Internet", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 4000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "GITLAB-SRE-SREID": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "Internet_Out", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 4000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "CODIMD-SRE-SREID": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "Internet_Out", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 4000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "RDG-SRE-SREID": { - "InternetFromPort": { - "80": "Reachable", - "443": "Reachable" - }, - "Rules": [{ - "Name": "HTTPS_In", - "Protocol": "TCP", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "443-443" - ], - "SourceAddressPrefix": [ - "193.60.220.253/32" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 100, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - }, - { - "Name": "RADIUS_Authentication_RDS_to_NPS", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "1645-1645", - "1646-1646", - "1812-1812", - "1813-1813" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "10.0.0.248/32" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 300, - "Direction": "Outbound" - } - ] - }, - "APP-SRE-SREID": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "Deny_Internet", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 4000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - }, - "DKP-SRE-SREID": { - "InternetFromPort": { - "80": "Unreachable", - "443": "Unreachable" - }, - "Rules": [{ - "Name": "Deny_Internet", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 4000, - "Direction": "Outbound" - }, - { - "Name": "AllowVnetInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Inbound" - }, - { - "Name": "AllowAzureLoadBalancerInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "AzureLoadBalancer" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Inbound" - }, - { - "Name": "DenyAllInBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Inbound" - }, - { - "Name": "AllowVnetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "VirtualNetwork" - ], - "DestinationAddressPrefix": [ - "VirtualNetwork" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65000, - "Direction": "Outbound" - }, - { - "Name": "AllowInternetOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "Internet" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Allow", - "Priority": 65001, - "Direction": "Outbound" - }, - { - "Name": "DenyAllOutBound", - "Protocol": "All", - "SourcePortRange": [ - "0-65535" - ], - "DestinationPortRange": [ - "0-65535" - ], - "SourceAddressPrefix": [ - "*" - ], - "DestinationAddressPrefix": [ - "*" - ], - "ExpandedSourceAddressPrefix": null, - "ExpandedDestinationAddressPrefix": null, - "Access": "Deny", - "Priority": 65500, - "Direction": "Outbound" - } - ] - } -} \ No newline at end of file diff --git a/tests/commands/conftest.py b/tests/commands/conftest.py new file mode 100644 index 0000000000..6459c84d6c --- /dev/null +++ b/tests/commands/conftest.py @@ -0,0 +1,238 @@ +from pytest import fixture +from typer.testing import CliRunner + +from data_safe_haven.config import ( + Context, + ContextManager, + DSHPulumiConfig, + SHMConfig, + SREConfig, +) +from data_safe_haven.exceptions import ( + DataSafeHavenAzureAPIAuthenticationError, + DataSafeHavenAzureError, +) +from data_safe_haven.external import AzureSdk, GraphApi +from data_safe_haven.infrastructure import ImperativeSHM, SREProjectManager + + +@fixture +def context(context_yaml) -> Context: + return ContextManager.from_yaml(context_yaml).context + + +@fixture +def mock_azure_sdk_blob_exists_false(mocker): + mocker.patch.object(AzureSdk, "blob_exists", return_value=False) + + +@fixture +def mock_graph_api_add_custom_domain(mocker): + mocker.patch.object( + GraphApi, "add_custom_domain", return_value="dummy-verification-record" + ) + + +@fixture +def mock_graph_api_token(mocker): + mocker.patch.object(GraphApi, "token", return_value="dummy-token") + + +@fixture +def mock_imperative_shm_deploy(mocker): + mocker.patch.object( + ImperativeSHM, + "deploy", + side_effect=print("mock deploy"), # noqa: T201 + ) + + +@fixture +def mock_imperative_shm_deploy_then_exit(mocker): + def create_then_exit(): + print("mock deploy") # noqa: T201 + msg = "mock deploy error" + raise DataSafeHavenAzureAPIAuthenticationError(msg) + + mocker.patch.object( + ImperativeSHM, + "deploy", + side_effect=create_then_exit, + ) + + +@fixture +def mock_imperative_shm_teardown_then_exit(mocker): + def teardown_then_exit(): + print("mock teardown") # noqa: T201 + msg = "mock teardown error" + raise DataSafeHavenAzureAPIAuthenticationError(msg) + + mocker.patch.object( + ImperativeSHM, + "teardown", + side_effect=teardown_then_exit, + ) + + +@fixture +def mock_ip_1_2_3_4(requests_mock): + requests_mock.get("https://api.ipify.org", text="1.2.3.4") + + +@fixture +def mock_pulumi_config_from_remote(mocker, pulumi_config): + mocker.patch.object(DSHPulumiConfig, "from_remote", return_value=pulumi_config) + + +@fixture +def mock_pulumi_config_from_remote_or_create(mocker, pulumi_config_empty): + mocker.patch.object( + DSHPulumiConfig, "from_remote_or_create", return_value=pulumi_config_empty + ) + + +@fixture +def mock_pulumi_config_no_key_from_remote(mocker, pulumi_config_no_key): + mocker.patch.object( + DSHPulumiConfig, "from_remote", return_value=pulumi_config_no_key + ) + + +@fixture +def mock_pulumi_config_upload(mocker): + mocker.patch.object(DSHPulumiConfig, "upload", return_value=None) + + +@fixture +def mock_shm_config_from_remote(mocker, shm_config): + mocker.patch.object(SHMConfig, "from_remote", return_value=shm_config) + + +@fixture +def mock_shm_config_from_remote_fails(mocker): + mocker.patch.object( + SHMConfig, + "from_remote", + side_effect=DataSafeHavenAzureError("mock from_remote failure"), + ) + + +@fixture +def mock_shm_config_remote_exists(mocker): + mocker.patch.object(SHMConfig, "remote_exists", return_value=True) + + +@fixture +def mock_shm_config_remote_yaml_diff(mocker): + mocker.patch.object(SHMConfig, "remote_yaml_diff", return_value=[]) + + +@fixture +def mock_shm_config_upload(mocker): + mocker.patch.object(SHMConfig, "upload", return_value=None) + + +@fixture +def mock_sre_config_from_remote(mocker, sre_config): + mocker.patch.object(SREConfig, "from_remote_by_name", return_value=sre_config) + + +@fixture +def mock_sre_config_alternate_from_remote(mocker, sre_config_alternate): + mocker.patch.object( + SREConfig, "from_remote_by_name", return_value=sre_config_alternate + ) + + +@fixture +def mock_sre_project_manager_deploy_then_exit(mocker): + def create_then_exit(*args, **kwargs): # noqa: ARG001 + print("mock deploy") # noqa: T201 + msg = "mock deploy error" + raise DataSafeHavenAzureAPIAuthenticationError(msg) + + mocker.patch.object( + SREProjectManager, + "deploy", + side_effect=create_then_exit, + ) + + +@fixture +def mock_sre_project_manager_teardown_then_exit(mocker): + def teardown_then_exit(*args, **kwargs): # noqa: ARG001 + print("mock teardown") # noqa: T201 + msg = "mock teardown error" + raise DataSafeHavenAzureAPIAuthenticationError(msg) + + mocker.patch.object( + SREProjectManager, + "teardown", + side_effect=teardown_then_exit, + ) + + +@fixture +def runner(tmp_contexts): + runner = CliRunner( + env={ + "DSH_CONFIG_DIRECTORY": str(tmp_contexts), + "COLUMNS": "500", # Set large number of columns to avoid rich wrapping text + "TERM": "dumb", # Disable colours, style and interactive rich features + }, + mix_stderr=False, + ) + return runner + + +@fixture +def runner_none(tmp_contexts_none): + runner = CliRunner( + env={ + "DSH_CONFIG_DIRECTORY": str(tmp_contexts_none), + "COLUMNS": "500", # Set large number of columns to avoid rich wrapping text + "TERM": "dumb", # Disable colours, style and interactive rich features + }, + mix_stderr=False, + ) + return runner + + +@fixture +def runner_no_context_file(tmp_path): + runner = CliRunner( + env={ + "DSH_CONFIG_DIRECTORY": str(tmp_path), + "COLUMNS": "500", # Set large number of columns to avoid rich wrapping text + "TERM": "dumb", # Disable colours, style and interactive rich features + }, + mix_stderr=False, + ) + return runner + + +@fixture +def tmp_contexts(tmp_path, context_yaml): + config_file_path = tmp_path / "contexts.yaml" + with open(config_file_path, "w") as f: + f.write(context_yaml) + return tmp_path + + +@fixture +def tmp_contexts_gems(tmp_path, context_yaml): + context_yaml = context_yaml.replace("selected: acmedeployment", "selected: gems") + config_file_path = tmp_path / "contexts.yaml" + with open(config_file_path, "w") as f: + f.write(context_yaml) + return tmp_path + + +@fixture +def tmp_contexts_none(tmp_path, context_yaml): + context_yaml = context_yaml.replace("selected: acmedeployment", "selected: null") + config_file_path = tmp_path / "contexts.yaml" + with open(config_file_path, "w") as f: + f.write(context_yaml) + return tmp_path diff --git a/tests/commands/test_cli.py b/tests/commands/test_cli.py new file mode 100644 index 0000000000..889382776f --- /dev/null +++ b/tests/commands/test_cli.py @@ -0,0 +1,35 @@ +from data_safe_haven.commands import application +from data_safe_haven.version import __version__ + + +class TestHelp: + def result_checker(self, result): + assert result.exit_code == 0 + assert "Usage: dsh [OPTIONS] COMMAND [ARGS]..." in result.stdout + assert "Arguments to the main executable" in result.stdout + assert "│ --verbose" in result.stdout + assert "│ --show-level" in result.stdout + assert "│ --version" in result.stdout + assert "│ --install-completion" in result.stdout + assert "│ --show-completion" in result.stdout + assert "│ --help" in result.stdout + assert "│ users" in result.stdout + assert "│ config" in result.stdout + assert "│ context" in result.stdout + assert "│ shm" in result.stdout + assert "│ sre" in result.stdout + + def test_help(self, runner): + result = runner.invoke(application, ["--help"]) + self.result_checker(result) + + def test_help_short_code(self, runner): + result = runner.invoke(application, ["-h"]) + self.result_checker(result) + + +class TestVersion: + def test_version(self, runner): + result = runner.invoke(application, ["--version"]) + assert result.exit_code == 0 + assert f"Data Safe Haven {__version__}" in result.stdout diff --git a/tests/commands/test_config_shm.py b/tests/commands/test_config_shm.py new file mode 100644 index 0000000000..7298e41ead --- /dev/null +++ b/tests/commands/test_config_shm.py @@ -0,0 +1,61 @@ +from data_safe_haven.commands.config import config_command_group +from data_safe_haven.config import ContextManager, SHMConfig +from data_safe_haven.exceptions import DataSafeHavenAzureError, DataSafeHavenConfigError +from data_safe_haven.external import AzureSdk + + +class TestShowSHM: + def test_show(self, mocker, runner, context, shm_config_yaml): + mock_method = mocker.patch.object( + AzureSdk, "download_blob", return_value=shm_config_yaml + ) + result = runner.invoke(config_command_group, ["show-shm"]) + + assert result.exit_code == 0 + assert shm_config_yaml in result.stdout + + mock_method.assert_called_once_with( + SHMConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_show_file(self, mocker, runner, shm_config_yaml, tmp_path): + mocker.patch.object(AzureSdk, "download_blob", return_value=shm_config_yaml) + template_file = (tmp_path / "template_show.yaml").absolute() + result = runner.invoke( + config_command_group, ["show-shm", "--file", str(template_file)] + ) + + assert result.exit_code == 0 + with open(template_file) as f: + template_text = f.read() + assert shm_config_yaml in template_text + + def test_no_remote(self, mocker, runner): + + mocker.patch.object( + SHMConfig, "from_remote", side_effect=DataSafeHavenAzureError(" ") + ) + result = runner.invoke(config_command_group, ["show-shm"]) + assert "SHM must be deployed" in result.stdout + assert result.exit_code == 1 + + def test_no_context(self, mocker, runner): + + mocker.patch.object( + ContextManager, "from_file", side_effect=DataSafeHavenConfigError(" ") + ) + result = runner.invoke(config_command_group, ["show-shm"]) + assert "No context is selected" in result.stdout + assert result.exit_code == 1 + + def test_no_selected_context(self, mocker, runner): + + mocker.patch.object( + ContextManager, "assert_context", side_effect=DataSafeHavenConfigError(" ") + ) + result = runner.invoke(config_command_group, ["show-shm"]) + assert "No context is selected" in result.stdout + assert result.exit_code == 1 diff --git a/tests/commands/test_config_sre.py b/tests/commands/test_config_sre.py new file mode 100644 index 0000000000..7ae303b12f --- /dev/null +++ b/tests/commands/test_config_sre.py @@ -0,0 +1,227 @@ +from pathlib import Path + +from data_safe_haven.commands.config import config_command_group +from data_safe_haven.config import ContextManager, SREConfig +from data_safe_haven.config.sre_config import sre_config_name +from data_safe_haven.exceptions import ( + DataSafeHavenAzureStorageError, + DataSafeHavenConfigError, + DataSafeHavenError, +) +from data_safe_haven.external import AzureSdk + + +class TestShowSRE: + def test_show(self, mocker, runner, context, sre_config_yaml): + sre_name = "sandbox" + mock_method = mocker.patch.object( + AzureSdk, "download_blob", return_value=sre_config_yaml + ) + result = runner.invoke(config_command_group, ["show", sre_name]) + + assert result.exit_code == 0 + assert sre_config_yaml in result.stdout + + mock_method.assert_called_once_with( + sre_config_name(sre_name), + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_show_file(self, mocker, runner, sre_config_yaml, tmp_path): + mocker.patch.object(AzureSdk, "download_blob", return_value=sre_config_yaml) + template_file = (tmp_path / "template_show.yaml").absolute() + result = runner.invoke( + config_command_group, ["show", "sre-name", "--file", str(template_file)] + ) + + assert result.exit_code == 0 + with open(template_file) as f: + template_text = f.read() + assert sre_config_yaml in template_text + + def test_no_context(self, mocker, runner): + sre_name = "sandbox" + mocker.patch.object( + ContextManager, "from_file", side_effect=DataSafeHavenConfigError(" ") + ) + result = runner.invoke(config_command_group, ["show", sre_name]) + assert "No context is selected" in result.stdout + assert result.exit_code == 1 + + def test_no_selected_context(self, mocker, runner): + sre_name = "sandbox" + mocker.patch.object( + ContextManager, "assert_context", side_effect=DataSafeHavenConfigError(" ") + ) + result = runner.invoke(config_command_group, ["show", sre_name]) + assert "No context is selected" in result.stdout + assert result.exit_code == 1 + + def test_no_storage_account(self, mocker, runner): + sre_name = "sandbox" + mocker.patch.object( + SREConfig, + "from_remote_by_name", + side_effect=DataSafeHavenAzureStorageError(" "), + ) + result = runner.invoke(config_command_group, ["show", sre_name]) + assert "Ensure SHM is deployed" in result.stdout + assert result.exit_code == 1 + + def test_incorrect_sre_name(self, mocker, runner): + sre_name = "sandbox" + mocker.patch.object( + SREConfig, "from_remote_by_name", side_effect=DataSafeHavenError(" ") + ) + result = runner.invoke(config_command_group, ["show", sre_name]) + assert "No configuration exists for an SRE" in result.stdout + assert result.exit_code == 1 + + +class TestTemplateSRE: + def test_template(self, runner): + result = runner.invoke(config_command_group, ["template"]) + assert result.exit_code == 0 + assert ( + "subscription_id: ID of the Azure subscription that the SRE will be deployed to" + in result.stdout + ) + assert "sre:" in result.stdout + + def test_template_file(self, runner, tmp_path): + template_file = (tmp_path / "template_create.yaml").absolute() + result = runner.invoke( + config_command_group, ["template", "--file", str(template_file)] + ) + assert result.exit_code == 0 + with open(template_file) as f: + template_text = f.read() + assert ( + "subscription_id: ID of the Azure subscription that the SRE will be deployed to" + in template_text + ) + assert "sre:" in template_text + + +class TestUploadSRE: + def test_upload_new( + self, mocker, context, runner, sre_config_yaml, sre_config_file + ): + sre_name = "SandBox" + sre_filename = sre_config_name(sre_name) + mock_exists = mocker.patch.object( + SREConfig, "remote_exists", return_value=False + ) + mock_upload = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + result = runner.invoke( + config_command_group, + ["upload", str(sre_config_file)], + ) + assert result.exit_code == 0 + + mock_exists.assert_called_once_with(context, filename=sre_filename) + mock_upload.assert_called_once_with( + sre_config_yaml, + sre_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_upload_no_changes( + self, mocker, context, runner, sre_config, sre_config_file + ): + sre_name = "SandBox" + sre_filename = sre_config_name(sre_name) + mock_exists = mocker.patch.object(SREConfig, "remote_exists", return_value=True) + mock_from_remote = mocker.patch.object( + SREConfig, "from_remote", return_value=sre_config + ) + mock_upload = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + result = runner.invoke( + config_command_group, + ["upload", str(sre_config_file)], + ) + assert result.exit_code == 0 + + mock_exists.assert_called_once_with(context, filename=sre_filename) + mock_from_remote.assert_called_once_with(context, filename=sre_filename) + mock_upload.assert_not_called() + + assert "No changes, won't upload configuration." in result.stdout + + def test_upload_changes( + self, + mocker, + context, + runner, + sre_config_alternate, + sre_config_file, + sre_config_yaml, + ): + sre_name = "sandbox" + sre_filename = sre_config_name(sre_name) + mock_exists = mocker.patch.object(SREConfig, "remote_exists", return_value=True) + mock_from_remote = mocker.patch.object( + SREConfig, "from_remote", return_value=sre_config_alternate + ) + mock_upload = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + result = runner.invoke( + config_command_group, + ["upload", str(sre_config_file)], + input="y\n", + ) + assert result.exit_code == 0 + + mock_exists.assert_called_once_with(context, filename=sre_filename) + mock_from_remote.assert_called_once_with(context, filename=sre_filename) + mock_upload.assert_called_once_with( + sre_config_yaml, + sre_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + assert "--- remote" in result.stdout + assert "+++ local" in result.stdout + + def test_upload_changes_n( + self, mocker, context, runner, sre_config_alternate, sre_config_file + ): + sre_name = "SandBox" + sre_filename = sre_config_name(sre_name) + mock_exists = mocker.patch.object(SREConfig, "remote_exists", return_value=True) + mock_from_remote = mocker.patch.object( + SREConfig, "from_remote", return_value=sre_config_alternate + ) + mock_upload = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + result = runner.invoke( + config_command_group, + ["upload", str(sre_config_file)], + input="n\n", + ) + assert result.exit_code == 0 + + mock_exists.assert_called_once_with(context, filename=sre_filename) + mock_from_remote.assert_called_once_with(context, filename=sre_filename) + mock_upload.assert_not_called() + + assert "--- remote" in result.stdout + assert "+++ local" in result.stdout + + def test_upload_no_file(self, mocker, runner): + mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + result = runner.invoke( + config_command_group, + ["upload"], + ) + assert result.exit_code == 2 + + def test_upload_file_does_not_exist(self, mocker, runner): + mocker.patch.object(Path, "is_file", return_value=False) + result = runner.invoke(config_command_group, ["upload", "fake_config.yaml"]) + assert "Configuration file 'fake_config.yaml' not found." in result.stdout + assert result.exit_code == 1 diff --git a/tests/commands/test_context.py b/tests/commands/test_context.py new file mode 100644 index 0000000000..03cad5b547 --- /dev/null +++ b/tests/commands/test_context.py @@ -0,0 +1,214 @@ +from data_safe_haven.commands.context import context_command_group + + +class TestShow: + def test_show(self, runner): + result = runner.invoke(context_command_group, ["show"]) + assert result.exit_code == 0 + assert "Current context: acmedeployment" in result.stdout + assert "Description: Acme Deployment" in result.stdout + + def test_show_none(self, runner_none): + result = runner_none.invoke(context_command_group, ["show"]) + assert result.exit_code == 0 + assert "Current context: None" in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke(context_command_group, ["show"]) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout + + +class TestAvailable: + def test_available(self, runner): + result = runner.invoke(context_command_group, ["available"]) + assert result.exit_code == 0 + assert "acmedeployment*" in result.stdout + assert "gems" in result.stdout + + def test_available_none(self, runner_none): + result = runner_none.invoke(context_command_group, ["available"]) + assert result.exit_code == 0 + assert "acmedeployment" in result.stdout + assert "gems" in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke(context_command_group, ["available"]) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout + + +class TestSwitch: + def test_switch(self, runner): + result = runner.invoke(context_command_group, ["switch", "gems"]) + assert result.exit_code == 0 + assert "Switched context to 'gems'." in result.stdout + result = runner.invoke(context_command_group, ["available"]) + assert result.exit_code == 0 + assert "gems*" in result.stdout + + def test_invalid_switch(self, runner): + result = runner.invoke(context_command_group, ["switch", "invalid"]) + assert result.exit_code == 1 + assert "Context 'invalid' is not defined." in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke( + context_command_group, ["switch", "context"] + ) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout + + +class TestAdd: + def test_add(self, runner): + result = runner.invoke( + context_command_group, + [ + "add", + "--name", + "example", + "--admin-group-name", + "Example Admins", + "--description", + "Example Deployment", + "--subscription-name", + "Data Safe Haven Example", + ], + ) + assert result.exit_code == 0 + result = runner.invoke(context_command_group, ["switch", "example"]) + assert result.exit_code == 0 + + def test_add_duplicate(self, runner): + result = runner.invoke( + context_command_group, + [ + "add", + "--admin-group-name", + "Example Admins", + "--description", + "Acme Deployment", + "--name", + "acmedeployment", + "--subscription-name", + "Data Safe Haven Acme", + ], + ) + assert result.exit_code == 1 + assert ( + "A context with name 'acmedeployment' is already defined." in result.stdout + ) + + def test_add_invalid_entra_group_name(self, runner): + result = runner.invoke( + context_command_group, + [ + "add", + "--admin-group-name", + " Example Admins", + "--description", + "Acme Deployment", + "--name", + "acmedeployment", + "--subscription-name", + "Invalid Subscription Name ", + ], + ) + assert result.exit_code == 2 + assert "Invalid value for '--admin-group-name':" in result.stderr + + def test_add_invalid_subscription_name(self, runner): + result = runner.invoke( + context_command_group, + [ + "add", + "--admin-group-name", + "Example Admins", + "--description", + "Example Deployment", + "--name", + "example", + "--subscription-name", + "Invalid Subscription Name ^$", + ], + ) + assert result.exit_code == 2 + assert "Invalid value for '--subscription-name':" in result.stderr + + def test_add_missing_ags(self, runner): + result = runner.invoke( + context_command_group, + [ + "add", + "--name", + "example", + ], + ) + assert result.exit_code == 2 + assert "Missing option" in result.stderr + + def test_add_bootstrap(self, tmp_contexts, runner): + (tmp_contexts / "contexts.yaml").unlink() + result = runner.invoke( + context_command_group, + [ + "add", + "--admin-group-name", + "Acme Admins", + "--description", + "Acme Deployment", + "--name", + "acmedeployment", + "--subscription-name", + "Data Safe Haven Acme", + ], + ) + assert result.exit_code == 0 + assert (tmp_contexts / "contexts.yaml").exists() + result = runner.invoke(context_command_group, ["show"]) + assert result.exit_code == 0 + assert "Description: Acme Deployment" in result.stdout + result = runner.invoke(context_command_group, ["available"]) + assert result.exit_code == 0 + assert "acmedeployment*" in result.stdout + assert "gems" not in result.stdout + + +class TestUpdate: + def test_update(self, runner): + result = runner.invoke( + context_command_group, ["update", "--description", "New Name"] + ) + assert result.exit_code == 0 + result = runner.invoke(context_command_group, ["show"]) + assert result.exit_code == 0 + assert "Description: New Name" in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke( + context_command_group, ["update", "--description", "New Name"] + ) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout + + +class TestRemove: + def test_remove(self, runner): + result = runner.invoke(context_command_group, ["remove", "gems"]) + assert result.exit_code == 0 + result = runner.invoke(context_command_group, ["available"]) + assert result.exit_code == 0 + assert "gems" not in result.stdout + + def test_remove_invalid(self, runner): + result = runner.invoke(context_command_group, ["remove", "invalid"]) + assert result.exit_code == 1 + assert "No context with name 'invalid'." in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke( + context_command_group, ["remove", "gems"] + ) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout diff --git a/tests/commands/test_pulumi.py b/tests/commands/test_pulumi.py new file mode 100644 index 0000000000..fefb4615fc --- /dev/null +++ b/tests/commands/test_pulumi.py @@ -0,0 +1,61 @@ +from data_safe_haven.commands.pulumi import pulumi_command_group + + +class TestRun: + def test_run_sre( + self, + runner, + local_project_settings, # noqa: ARG002 + mock_graph_api_token, # noqa: ARG002 + mock_install_plugins, # noqa: ARG002 + mock_key_vault_key, # noqa: ARG002 + mock_pulumi_config_no_key_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + offline_pulumi_account, # noqa: ARG002 + ): + result = runner.invoke(pulumi_command_group, ["sandbox", "stack ls"]) + assert result.exit_code == 0 + assert "shm-acmedeployment-sre-sandbox*" in result.stdout + + def test_run_sre_incorrect_arguments( + self, + runner, + ): + result = runner.invoke(pulumi_command_group, ["stack ls"]) + assert result.exit_code == 2 + assert "Usage: run [OPTIONS] SRE_NAME COMMAND" in result.stderr + + def test_run_sre_invalid_command( + self, + runner, + local_project_settings, # noqa: ARG002 + mock_graph_api_token, # noqa: ARG002 + mock_install_plugins, # noqa: ARG002 + mock_key_vault_key, # noqa: ARG002 + mock_pulumi_config_no_key_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + offline_pulumi_account, # noqa: ARG002 + ): + result = runner.invoke( + pulumi_command_group, ["sandbox", "not a pulumi command"] + ) + assert result.exit_code == 1 + assert "Failed to run command 'not a pulumi command'." in result.stdout + + def test_run_sre_invalid_name( + self, + runner, + local_project_settings, # noqa: ARG002 + mock_graph_api_token, # noqa: ARG002 + mock_install_plugins, # noqa: ARG002 + mock_key_vault_key, # noqa: ARG002 + mock_pulumi_config_no_key_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_alternate_from_remote, # noqa: ARG002 + offline_pulumi_account, # noqa: ARG002 + ): + result = runner.invoke(pulumi_command_group, ["alternate", "stack ls"]) + assert result.exit_code == 1 + assert "No SRE named alternative is defined" in result.stdout diff --git a/tests/commands/test_shm.py b/tests/commands/test_shm.py new file mode 100644 index 0000000000..8258d2a16a --- /dev/null +++ b/tests/commands/test_shm.py @@ -0,0 +1,71 @@ +from data_safe_haven.commands.shm import shm_command_group + + +class TestDeploySHM: + def test_infrastructure_deploy( + self, + runner, + mock_imperative_shm_deploy_then_exit, # noqa: ARG002 + mock_graph_api_add_custom_domain, # noqa: ARG002 + mock_graph_api_token, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_shm_config_remote_exists, # noqa: ARG002 + mock_shm_config_upload, # noqa: ARG002 + ): + result = runner.invoke(shm_command_group, ["deploy"]) + assert result.exit_code == 1 + assert "mock deploy" in result.stdout + assert "mock deploy error" in result.stdout + + def test_infrastructure_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke(shm_command_group, ["deploy"]) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout + + def test_infrastructure_show_none(self, runner_none): + result = runner_none.invoke(shm_command_group, ["deploy"]) + assert result.exit_code == 1 + assert "No context selected." in result.stdout + + def test_infrastructure_auth_failure( + self, + runner, + mock_azuresdk_get_credential_failure, # noqa: ARG002 + ): + result = runner.invoke(shm_command_group, ["deploy"]) + assert result.exit_code == 1 + assert "mock get_credential\n" in result.stdout + assert "mock get_credential error" in result.stdout + + +class TestTeardownSHM: + def test_teardown( + self, + runner, + mock_imperative_shm_teardown_then_exit, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + ): + result = runner.invoke(shm_command_group, ["teardown"]) + assert result.exit_code == 1 + assert "mock teardown" in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke(shm_command_group, ["teardown"]) + assert result.exit_code == 1 + assert "No context configuration file." in result.stdout + + def test_show_none(self, runner_none): + result = runner_none.invoke(shm_command_group, ["teardown"]) + assert result.exit_code == 1 + assert "No context selected." in result.stdout + + def test_auth_failure( + self, + runner, + mock_azuresdk_get_credential_failure, # noqa: ARG002 + ): + result = runner.invoke(shm_command_group, ["teardown"]) + assert result.exit_code == 1 + assert "mock get_credential\n" in result.stdout + assert "mock get_credential error" in result.stdout + assert "Could not teardown Safe Haven Management environment." in result.stdout diff --git a/tests/commands/test_sre.py b/tests/commands/test_sre.py new file mode 100644 index 0000000000..dd6a9fba05 --- /dev/null +++ b/tests/commands/test_sre.py @@ -0,0 +1,89 @@ +from data_safe_haven.commands.sre import sre_command_group + + +class TestDeploySRE: + def test_deploy( + self, + runner, + mock_graph_api_token, # noqa: ARG002 + mock_ip_1_2_3_4, # noqa: ARG002 + mock_pulumi_config_from_remote_or_create, # noqa: ARG002 + mock_pulumi_config_upload, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + mock_sre_project_manager_deploy_then_exit, # noqa: ARG002 + ): + result = runner.invoke(sre_command_group, ["deploy", "sandbox"]) + assert result.exit_code == 1 + assert "mock deploy" in result.stdout + assert "mock deploy error" in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke(sre_command_group, ["deploy", "sandbox"]) + assert result.exit_code == 1 + assert "Could not find file" in result.stdout + + def test_auth_failure( + self, + runner, + mock_azuresdk_get_credential_failure, # noqa: ARG002 + ): + result = runner.invoke(sre_command_group, ["deploy", "sandbox"]) + assert result.exit_code == 1 + assert "mock get_credential\n" in result.stdout + assert "mock get_credential error" in result.stdout + + def test_no_shm( + self, + capfd, + runner, + mock_shm_config_from_remote_fails, # noqa: ARG002 + ): + result = runner.invoke(sre_command_group, ["deploy", "sandbox"]) + out, _ = capfd.readouterr() + assert result.exit_code == 1 + assert "mock from_remote failure" in out + + +class TestTeardownSRE: + def test_teardown( + self, + runner, + mock_graph_api_token, # noqa: ARG002 + mock_ip_1_2_3_4, # noqa: ARG002 + mock_pulumi_config_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + mock_sre_project_manager_teardown_then_exit, # noqa: ARG002 + ): + result = runner.invoke(sre_command_group, ["teardown", "sandbox"]) + assert result.exit_code == 1 + assert "mock teardown" in result.stdout + + def test_no_context_file(self, runner_no_context_file): + result = runner_no_context_file.invoke( + sre_command_group, ["teardown", "sandbox"] + ) + assert result.exit_code == 1 + assert "Could not find file" in result.stdout + + def test_no_shm( + self, + capfd, + runner, + mock_shm_config_from_remote_fails, # noqa: ARG002 + ): + result = runner.invoke(sre_command_group, ["teardown", "sandbox"]) + out, _ = capfd.readouterr() + assert result.exit_code == 1 + assert "mock from_remote failure" in out + + def test_auth_failure( + self, + runner, + mock_azuresdk_get_credential_failure, # noqa: ARG002 + ): + result = runner.invoke(sre_command_group, ["teardown", "sandbox"]) + assert result.exit_code == 1 + assert "mock get_credential\n" in result.stdout + assert "mock get_credential error" in result.stdout diff --git a/tests/commands/test_users.py b/tests/commands/test_users.py new file mode 100644 index 0000000000..a9feb25ee2 --- /dev/null +++ b/tests/commands/test_users.py @@ -0,0 +1,100 @@ +from data_safe_haven.commands.users import users_command_group + + +class TestAdd: + def test_invalid_shm( + self, + mock_shm_config_from_remote_fails, # noqa: ARG002 + runner, + tmp_contexts_gems, # noqa: ARG002 + ): + result = runner.invoke(users_command_group, ["add", "users.csv"]) + + assert result.exit_code == 1 + assert "Have you deployed the SHM?" in result.stdout + + +class TestListUsers: + def test_invalid_shm( + self, + mock_shm_config_from_remote_fails, # noqa: ARG002 + runner, + tmp_contexts_gems, # noqa: ARG002 + ): + result = runner.invoke(users_command_group, ["list", "my_sre"]) + + assert result.exit_code == 1 + assert "Have you deployed the SHM?" in result.stdout + + +class TestRegister: + def test_invalid_shm( + self, + mock_shm_config_from_remote_fails, # noqa: ARG002 + runner, + tmp_contexts_gems, # noqa: ARG002 + ): + result = runner.invoke( + users_command_group, ["register", "-u", "Harry Lime", "my_sre"] + ) + + assert result.exit_code == 1 + assert "Have you deployed the SHM?" in result.stdout + + def test_invalid_sre( + self, + mock_pulumi_config_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + runner, + tmp_contexts, # noqa: ARG002 + ): + result = runner.invoke( + users_command_group, ["register", "-u", "Harry Lime", "my_sre"] + ) + + assert result.exit_code == 1 + assert "Have you deployed the SRE?" in result.stdout + + +class TestRemove: + def test_invalid_shm( + self, + mock_shm_config_from_remote_fails, # noqa: ARG002 + runner, + tmp_contexts_gems, # noqa: ARG002 + ): + result = runner.invoke(users_command_group, ["remove", "-u", "Harry Lime"]) + + assert result.exit_code == 1 + assert "Have you deployed the SHM?" in result.stdout + + +class TestUnregister: + def test_invalid_shm( + self, + mock_shm_config_from_remote_fails, # noqa: ARG002 + runner, + tmp_contexts_gems, # noqa: ARG002 + ): + result = runner.invoke( + users_command_group, ["unregister", "-u", "Harry Lime", "my_sre"] + ) + + assert result.exit_code == 1 + assert "Have you deployed the SHM?" in result.stdout + + def test_invalid_sre( + self, + mock_pulumi_config_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + runner, + tmp_contexts, # noqa: ARG002 + ): + result = runner.invoke( + users_command_group, ["unregister", "-u", "Harry Lime", "my_sre"] + ) + + assert result.exit_code == 1 + assert "Have you deployed the SRE?" in result.stdout diff --git a/tests/config/test_config_sections.py b/tests/config/test_config_sections.py new file mode 100644 index 0000000000..2cbcf07655 --- /dev/null +++ b/tests/config/test_config_sections.py @@ -0,0 +1,176 @@ +import pytest +from pydantic import ValidationError + +from data_safe_haven.config.config_sections import ( + ConfigSectionAzure, + ConfigSectionDockerHub, + ConfigSectionSHM, + ConfigSectionSRE, + ConfigSubsectionRemoteDesktopOpts, +) +from data_safe_haven.types import DatabaseSystem, SoftwarePackageCategory + + +class TestConfigSectionAzure: + def test_constructor(self) -> None: + ConfigSectionAzure( + location="uksouth", + subscription_id="d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + tenant_id="d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + ) + + def test_invalid_location(self): + with pytest.raises( + ValidationError, match="Value error, Expected valid Azure location" + ): + ConfigSectionAzure( + location="not_a_location", + subscription_id="d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + tenant_id="d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + ) + + def test_invalid_subscription_id(self): + with pytest.raises( + ValidationError, + match=r"1 validation error for ConfigSectionAzure\nsubscription_id\n Value error, Expected GUID", + ): + ConfigSectionAzure( + location="uksouth", + subscription_id="not_a_guid", + tenant_id="d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + ) + + def test_invalid_tenant_id(self): + with pytest.raises( + ValidationError, + match=r"1 validation error for ConfigSectionAzure\ntenant_id\n Value error, Expected GUID", + ): + ConfigSectionAzure( + location="uksouth", + subscription_id="d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + tenant_id="not_a_guid", + ) + + +class TestConfigSectionDockerHub: + def test_constructor(self) -> None: + ConfigSectionDockerHub( + access_token="dummytoken", + username="exampleuser", + ) + + def test_invalid_access_token(self): + with pytest.raises( + ValidationError, + match="Value error, Expected valid string containing only letters, numbers, hyphens and underscores.", + ): + ConfigSectionDockerHub( + access_token="not a valid access token", + username="exampleuser", + ) + + def test_invalid_username(self): + with pytest.raises( + ValidationError, + match="Value error, Expected valid string containing only letters, numbers, hyphens and underscores.", + ): + ConfigSectionDockerHub( + access_token="dummytoken", + username="not a valid username", + ) + + +class TestConfigSectionSHM: + def test_constructor(self, config_section_shm_dict) -> None: + ConfigSectionSHM(**config_section_shm_dict) + + def test_invalid_admin_group_id(self, config_section_shm_dict): + config_section_shm_dict["admin_group_id"] = "not a guid" + with pytest.raises( + ValidationError, + match=r"1 validation error for ConfigSectionSHM\nadmin_group_id\n Value error, Expected GUID", + ): + ConfigSectionSHM(**config_section_shm_dict) + + def test_invalid_entra_tenant_id(self, config_section_shm_dict): + config_section_shm_dict["entra_tenant_id"] = "not a guid" + with pytest.raises( + ValidationError, + match=r"1 validation error for ConfigSectionSHM\nentra_tenant_id\n Value error, Expected GUID", + ): + ConfigSectionSHM(**config_section_shm_dict) + + def test_invalid_fqdn(self, config_section_shm_dict): + config_section_shm_dict["fqdn"] = "not a domain" + with pytest.raises( + ValidationError, + match=r"1 validation error for ConfigSectionSHM\nfqdn\n Value error, Expected valid fully qualified domain name", + ): + ConfigSectionSHM(**config_section_shm_dict) + + +class TestConfigSectionSRE: + def test_constructor( + self, remote_desktop_config: ConfigSubsectionRemoteDesktopOpts + ) -> None: + sre_config = ConfigSectionSRE( + admin_email_address="admin@example.com", + admin_ip_addresses=["1.2.3.4"], + databases=[DatabaseSystem.POSTGRESQL], + data_provider_ip_addresses=["2.3.4.5"], + remote_desktop=remote_desktop_config, + workspace_skus=["Standard_D2s_v4"], + research_user_ip_addresses=["3.4.5.6"], + software_packages=SoftwarePackageCategory.ANY, + timezone="Australia/Perth", + ) + assert sre_config.admin_email_address == "admin@example.com" + assert sre_config.admin_ip_addresses[0] == "1.2.3.4/32" + assert sre_config.databases[0] == DatabaseSystem.POSTGRESQL + assert sre_config.data_provider_ip_addresses[0] == "2.3.4.5/32" + assert sre_config.remote_desktop == remote_desktop_config + assert sre_config.workspace_skus[0] == "Standard_D2s_v4" + assert sre_config.research_user_ip_addresses[0] == "3.4.5.6/32" + assert sre_config.software_packages == SoftwarePackageCategory.ANY + assert sre_config.timezone == "Australia/Perth" + + def test_constructor_defaults( + self, remote_desktop_config: ConfigSubsectionRemoteDesktopOpts + ) -> None: + sre_config = ConfigSectionSRE(admin_email_address="admin@example.com") + assert sre_config.admin_email_address == "admin@example.com" + assert sre_config.admin_ip_addresses == [] + assert sre_config.databases == [] + assert sre_config.data_provider_ip_addresses == [] + assert sre_config.remote_desktop == remote_desktop_config + assert sre_config.workspace_skus == [] + assert sre_config.research_user_ip_addresses == [] + assert sre_config.software_packages == SoftwarePackageCategory.NONE + assert sre_config.timezone == "Etc/UTC" + + def test_all_databases_must_be_unique(self) -> None: + with pytest.raises(ValueError, match=r"All items must be unique."): + ConfigSectionSRE( + databases=[DatabaseSystem.POSTGRESQL, DatabaseSystem.POSTGRESQL], + ) + + +class TestConfigSubsectionRemoteDesktopOpts: + def test_constructor(self) -> None: + ConfigSubsectionRemoteDesktopOpts(allow_copy=True, allow_paste=True) + + def test_constructor_defaults(self) -> None: + remote_desktop_config = ConfigSubsectionRemoteDesktopOpts() + assert not all( + (remote_desktop_config.allow_copy, remote_desktop_config.allow_paste) + ) + + def test_constructor_invalid_allow_copy(self) -> None: + with pytest.raises( + ValueError, + match=r"1 validation error for ConfigSubsectionRemoteDesktopOpts\nallow_paste\n Input should be a valid boolean", + ): + ConfigSubsectionRemoteDesktopOpts( + allow_copy=True, + allow_paste="not a bool", + ) diff --git a/tests/config/test_context_manager.py b/tests/config/test_context_manager.py new file mode 100644 index 0000000000..1118d1bbb3 --- /dev/null +++ b/tests/config/test_context_manager.py @@ -0,0 +1,284 @@ +import pytest +import yaml +from pydantic import ValidationError + +from data_safe_haven.config import Context, ContextManager +from data_safe_haven.exceptions import ( + DataSafeHavenConfigError, + DataSafeHavenTypeError, + DataSafeHavenValueError, +) +from data_safe_haven.version import __version__ + + +class TestContext: + def test_constructor(self, context_dict): + context = Context(**context_dict) + assert isinstance(context, Context) + assert all( + getattr(context, item) == context_dict[item] for item in context_dict.keys() + ) + assert context.storage_container_name == "config" + assert context.pulumi_storage_container_name == "pulumi" + assert context.pulumi_encryption_key_name == "pulumi-encryption-key" + + def test_invalid_subscription_name(self, context_dict): + context_dict["subscription_name"] = "very " * 15 + "long name" + with pytest.raises( + ValidationError, match="String should have at most 80 characters" + ): + Context(**context_dict) + + def test_tags(self, context): + assert context.tags["description"] == "Acme Deployment" + assert context.tags["project"] == "Data Safe Haven" + assert context.tags["shm_name"] == "acmedeployment" + assert context.tags["version"] == __version__ + + def test_name(self, context): + assert context.name == "acmedeployment" + + def test_work_directory(self, context, monkeypatch): + monkeypatch.delenv("DSH_CONFIG_DIRECTORY", raising=False) + assert "data_safe_haven/acmedeployment" in str(context.work_directory) + + def test_resource_group_name(self, context): + assert context.resource_group_name == "shm-acmedeployment-rg" + + def test_storage_account_name(self, context): + assert context.storage_account_name == "shmacmedeployment" + + def test_long_storage_account_name(self, context_dict): + context_dict["name"] = "very" * 5 + "longname" + context = Context(**context_dict) + assert context.storage_account_name == "shmveryveryveryveryveryl" + + def test_key_vault_name(self, context): + assert context.key_vault_name == "shm-acmedeployment-kv" + + def test_managed_identity_name(self, context): + assert context.managed_identity_name == "shm-acmedeployment-identity-reader" + + def test_pulumi_backend_url(self, context): + assert context.pulumi_backend_url == "azblob://pulumi" + + def test_pulumi_encryption_key(self, context, mock_key_vault_key): # noqa: ARG002 + key = context.pulumi_encryption_key + assert key.key_name == context.pulumi_encryption_key_name + assert key.key_vault_name == context.key_vault_name + + def test_pulumi_encryption_key_version( + self, context, mock_key_vault_key # noqa: ARG002 + ): + version = context.pulumi_encryption_key_version + assert version == "version" + + def test_pulumi_secrets_provider_url( + self, context, mock_key_vault_key # noqa: ARG002 + ): + assert ( + context.pulumi_secrets_provider_url + == "azurekeyvault://shm-acmedeployment-kv.vault.azure.net/keys/pulumi-encryption-key/version" + ) + + +class TestContextManager: + def test_constructor(self): + settings = ContextManager( + selected="acmedeployment", + contexts={ + "acmedeployment": Context( + admin_group_name="Acme Admins", + description="Acme Deployment", + name="acmedeployment", + subscription_name="Data Safe Haven Acme", + ) + }, + ) + assert isinstance(settings, ContextManager) + + def test_null_selected(self, context_yaml): + context_yaml = context_yaml.replace( + "selected: acmedeployment", "selected: null" + ) + + settings = ContextManager.from_yaml(context_yaml) + assert settings.selected is None + assert settings.context is None + with pytest.raises(DataSafeHavenConfigError, match="No context selected"): + settings.assert_context() + + def test_missing_selected(self, context_yaml): + context_yaml = "\n".join( + [line for line in context_yaml.splitlines() if "selected:" not in line] + ) + with pytest.raises( + DataSafeHavenTypeError, + match="Could not load ContextManager configuration.", + ): + ContextManager.from_yaml(context_yaml) + + def test_invalid_selected_input(self, context_yaml): + context_yaml = context_yaml.replace( + "selected: acmedeployment", "selected: invalid" + ) + with pytest.raises( + DataSafeHavenTypeError, + match="Could not load ContextManager configuration.", + ): + ContextManager.from_yaml(context_yaml) + + def test_invalid_yaml(self): + invalid_yaml = "a: [1,2" + with pytest.raises( + DataSafeHavenConfigError, + match="Could not parse ContextManager configuration as YAML.", + ): + ContextManager.from_yaml(invalid_yaml) + + def test_yaml_not_dict(self): + not_dict = "[1, 2, 3]" + with pytest.raises( + DataSafeHavenConfigError, + match="Unable to parse ContextManager configuration as a dict.", + ): + ContextManager.from_yaml(not_dict) + + def test_selected(self, context_manager): + assert context_manager.selected == "acmedeployment" + + def test_set_selected(self, context_manager): + assert context_manager.selected == "acmedeployment" + context_manager.selected = "gems" + assert context_manager.selected == "gems" + + def test_invalid_selected(self, context_manager): + with pytest.raises( + DataSafeHavenValueError, match="Context 'invalid' is not defined." + ): + context_manager.selected = "invalid" + + def test_context(self, context_yaml, context_manager): + yaml_dict = yaml.safe_load(context_yaml) + assert isinstance(context_manager.context, Context) + assert all( + getattr(context_manager.context, item) + == yaml_dict["contexts"]["acmedeployment"][item] + for item in yaml_dict["contexts"]["acmedeployment"].keys() + ) + + def test_set_context(self, context_yaml, context_manager): + yaml_dict = yaml.safe_load(context_yaml) + context_manager.selected = "gems" + assert isinstance(context_manager.context, Context) + assert all( + getattr(context_manager.context, item) + == yaml_dict["contexts"]["gems"][item] + for item in yaml_dict["contexts"]["gems"].keys() + ) + + def test_set_context_none(self, context_manager): + context_manager.selected = None + assert context_manager.selected is None + assert context_manager.context is None + + def test_assert_context(self, context_manager): + context = context_manager.assert_context() + assert context.description == "Acme Deployment" + assert context.name == "acmedeployment" + + def test_assert_context_none(self, context_manager): + context_manager.selected = None + with pytest.raises(DataSafeHavenConfigError, match="No context selected"): + context_manager.assert_context() + + def test_available(self, context_manager): + available = context_manager.available + assert isinstance(available, list) + assert all(isinstance(item, str) for item in available) + assert available == ["acmedeployment", "gems"] + + def test_update(self, context_manager): + assert context_manager.context.description == "Acme Deployment" + assert context_manager.context.name == "acmedeployment" + context_manager.update(name="replaced") + assert context_manager.context.name == "replaced" + + def test_set_update(self, context_manager): + context_manager.selected = "gems" + assert context_manager.context.description == "Gems" + assert context_manager.context.name == "gems" + context_manager.update(name="replaced") + assert context_manager.context.name == "replaced" + + def test_update_none(self, context_manager): + context_manager.selected = None + with pytest.raises(DataSafeHavenConfigError, match="No context selected"): + context_manager.update(name="replaced") + + def test_add(self, context_manager): + context_manager.add( + admin_group_name="Example Admins", + description="Example Deployment", + name="example", + subscription_name="Data Safe Haven Example", + ) + context_manager.selected = "example" + assert context_manager.selected == "example" + assert context_manager.context.description == "Example Deployment" + assert context_manager.context.name == "example" + assert context_manager.context.subscription_name == "Data Safe Haven Example" + + def test_invalid_add(self, context_manager): + with pytest.raises( + DataSafeHavenValueError, + match="A context with name 'acmedeployment' is already defined.", + ): + context_manager.add( + admin_group_name="Acme Admins", + description="Acme Deployment", + name="acmedeployment", + subscription_name="Data Safe Haven Acme", + ) + + def test_remove(self, context_manager): + context_manager.remove("gems") + assert "gems" not in context_manager.available + assert context_manager.selected == "acmedeployment" + + def test_invalid_remove(self, context_manager): + with pytest.raises( + DataSafeHavenValueError, match="No context with name 'invalid'." + ): + context_manager.remove("invalid") + + def test_remove_selected(self, context_manager): + context_manager.remove("acmedeployment") + assert "acmedeployment" not in context_manager.available + assert context_manager.selected is None + + def test_from_file(self, tmp_path, context_yaml): + config_file_path = tmp_path / "config.yaml" + with open(config_file_path, "w") as f: + f.write(context_yaml) + settings = ContextManager.from_file(config_file_path=config_file_path) + assert settings.context.description == "Acme Deployment" + assert settings.context.name == "acmedeployment" + + def test_file_not_found(self, tmp_path): + config_file_path = tmp_path / "config.yaml" + with pytest.raises(DataSafeHavenConfigError, match="Could not find file"): + ContextManager.from_file(config_file_path=config_file_path) + + def test_write(self, tmp_path, context_yaml): + config_file_path = tmp_path / "config.yaml" + with open(config_file_path, "w") as f: + f.write(context_yaml) + settings = ContextManager.from_file(config_file_path=config_file_path) + settings.selected = "gems" + settings.update(name="replaced") + settings.write(config_file_path) + with open(config_file_path) as f: + context_dict = yaml.safe_load(f) + assert context_dict["selected"] == "replaced" + assert context_dict["contexts"]["replaced"]["name"] == "replaced" diff --git a/tests/config/test_pulumi.py b/tests/config/test_pulumi.py new file mode 100644 index 0000000000..3bb57b1a38 --- /dev/null +++ b/tests/config/test_pulumi.py @@ -0,0 +1,213 @@ +from pytest import raises + +from data_safe_haven.config import DSHPulumiConfig, DSHPulumiProject +from data_safe_haven.exceptions import ( + DataSafeHavenConfigError, + DataSafeHavenTypeError, +) +from data_safe_haven.external import AzureSdk + + +class TestDSHPulumiProject: + def test_pulumi_project(self, pulumi_project): + assert isinstance(pulumi_project.stack_config, dict) + assert "azure-native:location" in pulumi_project.stack_config.keys() + assert pulumi_project.stack_config.get("azure-native:location") == "uksouth" + + def test_dump(self, pulumi_project, pulumi_project_stack_config): + d = pulumi_project.model_dump() + assert d.get("stack_config") == pulumi_project_stack_config + + def test_eq(self, pulumi_project): + assert pulumi_project == pulumi_project.model_copy(deep=True) + + def test_not_eq(self, pulumi_project, pulumi_project_other): + assert pulumi_project != pulumi_project_other + + +class TestDSHPulumiConfig: + def test_pulumi_config(self, pulumi_project): + config = DSHPulumiConfig( + encrypted_key="NZVaEDfeuIPR7N8Dwnpx", + projects={"acmedeployment": pulumi_project}, + ) + assert config.projects["acmedeployment"] == pulumi_project + assert isinstance(config.encrypted_key, str) + assert config.encrypted_key == "NZVaEDfeuIPR7N8Dwnpx" + + def test_getitem(self, pulumi_config, pulumi_project, pulumi_project_other): + assert pulumi_config["acmedeployment"] == pulumi_project + assert pulumi_config["other_project"] == pulumi_project_other + + def test_getitem_type_error(self, pulumi_config): + with raises(TypeError, match="'key' must be a string."): + pulumi_config[0] + + def test_getitem_index_error(self, pulumi_config): + with raises(KeyError, match="No configuration for DSH Pulumi Project Ringo."): + pulumi_config["Ringo"] + + def test_delitem(self, pulumi_config): + assert len(pulumi_config.projects) == 2 + del pulumi_config["acmedeployment"] + assert len(pulumi_config.projects) == 1 + + def test_delitem_value_error(self, pulumi_config): + with raises(TypeError, match="'key' must be a string."): + del pulumi_config[-1] + + def test_delitem_index_error(self, pulumi_config): + with raises(KeyError, match="No configuration for DSH Pulumi Project Ringo."): + del pulumi_config["Ringo"] + + def test_setitem(self, pulumi_config, pulumi_project): + del pulumi_config["acmedeployment"] + assert len(pulumi_config.project_names) == 1 + assert "acmedeployment" not in pulumi_config.project_names + pulumi_config["acmedeployment"] = pulumi_project + assert len(pulumi_config.project_names) == 2 + assert "acmedeployment" in pulumi_config.project_names + + def test_setitem_type_error(self, pulumi_config): + with raises(TypeError, match="'key' must be a string."): + pulumi_config[1] = 5 + + def test_setitem_value_error(self, pulumi_config): + with raises(ValueError, match="Stack other_project already exists."): + pulumi_config["other_project"] = 5 + + def test_project_names(self, pulumi_config): + assert "acmedeployment" in pulumi_config.project_names + + def test_to_yaml(self, pulumi_config): + yaml = pulumi_config.to_yaml() + assert isinstance(yaml, str) + assert "projects:" in yaml + assert "stack_config:" in yaml + assert "azure-native:location: uksouth" in yaml + + def test_from_yaml(self, pulumi_config_yaml): + pulumi_config = DSHPulumiConfig.from_yaml(pulumi_config_yaml) + assert len(pulumi_config.projects) == 2 + assert "acmedeployment" in pulumi_config.project_names + assert isinstance(pulumi_config["acmedeployment"], DSHPulumiProject) + assert "other_project" in pulumi_config.project_names + assert isinstance(pulumi_config["other_project"], DSHPulumiProject) + assert ( + pulumi_config["acmedeployment"].stack_config.get("data-safe-haven:variable") + == 5 + ) + + def test_from_yaml_invalid_yaml(self): + with raises( + DataSafeHavenConfigError, + match="Could not parse Pulumi configuration as YAML.", + ): + DSHPulumiConfig.from_yaml("a: [1,2") + + def test_from_yaml_not_dict(self): + with raises( + DataSafeHavenConfigError, + match="Unable to parse Pulumi configuration as a dict.", + ): + DSHPulumiConfig.from_yaml("5") + + def test_from_yaml_validation_error(self): + not_valid = "projects: -3" + with raises( + DataSafeHavenTypeError, match="Could not load Pulumi configuration." + ): + DSHPulumiConfig.from_yaml(not_valid) + + def test_upload(self, mocker, pulumi_config, context): + mock_method = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + pulumi_config.upload(context) + + mock_method.assert_called_once_with( + pulumi_config.to_yaml(), + DSHPulumiConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_from_remote(self, mocker, pulumi_config_yaml, context): + mock_method = mocker.patch.object( + AzureSdk, "download_blob", return_value=pulumi_config_yaml + ) + pulumi_config = DSHPulumiConfig.from_remote(context) + + assert isinstance(pulumi_config, DSHPulumiConfig) + assert pulumi_config["acmedeployment"] + assert len(pulumi_config.projects) == 2 + + mock_method.assert_called_once_with( + DSHPulumiConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_from_remote_or_create( + self, mocker, pulumi_config_yaml, context, mock_storage_exists + ): + mock_exists = mocker.patch.object(AzureSdk, "blob_exists", return_value=True) + mock_download = mocker.patch.object( + AzureSdk, "download_blob", return_value=pulumi_config_yaml + ) + pulumi_config = DSHPulumiConfig.from_remote_or_create(context, projects={}) + + assert isinstance(pulumi_config, DSHPulumiConfig) + assert pulumi_config["acmedeployment"] + assert len(pulumi_config.projects) == 2 + + mock_exists.assert_called_once_with( + DSHPulumiConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + mock_download.assert_called_once_with( + DSHPulumiConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + mock_storage_exists.assert_called_once_with( + context.storage_account_name, + ) + + def test_from_remote_or_create_create( + self, mocker, pulumi_config_yaml, context, mock_storage_exists # noqa: ARG002 + ): + mock_exists = mocker.patch.object(AzureSdk, "blob_exists", return_value=False) + pulumi_config = DSHPulumiConfig.from_remote_or_create( + context, encrypted_key="abc", projects={} + ) + + assert isinstance(pulumi_config, DSHPulumiConfig) + assert len(pulumi_config.projects) == 0 + + mock_exists.assert_called_once_with( + DSHPulumiConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + mock_storage_exists.assert_called_once_with( + context.storage_account_name, + ) + + def test_create_or_select_project(self, pulumi_config, pulumi_project): + assert len(pulumi_config.project_names) == 2 + project = pulumi_config.create_or_select_project("acmedeployment") + assert len(pulumi_config.project_names) == 2 + assert isinstance(project, DSHPulumiProject) + assert project == pulumi_project + project = pulumi_config.create_or_select_project("new_project") + assert len(pulumi_config.project_names) == 3 + assert isinstance(project, DSHPulumiProject) + assert project.stack_config == {} diff --git a/tests/config/test_shm_config.py b/tests/config/test_shm_config.py new file mode 100644 index 0000000000..d42c0c42d4 --- /dev/null +++ b/tests/config/test_shm_config.py @@ -0,0 +1,84 @@ +import pytest +from pydantic import ValidationError + +from data_safe_haven.config import Context, SHMConfig +from data_safe_haven.config.config_sections import ( + ConfigSectionAzure, + ConfigSectionSHM, +) +from data_safe_haven.exceptions import ( + DataSafeHavenTypeError, +) +from data_safe_haven.external import AzureSdk + + +class TestConfig: + def test_constructor( + self, + config_section_azure: ConfigSectionAzure, + config_section_shm: ConfigSectionSHM, + ) -> None: + config = SHMConfig( + azure=config_section_azure, + shm=config_section_shm, + ) + assert isinstance(config.azure, ConfigSectionAzure) + assert isinstance(config.shm, ConfigSectionSHM) + + def test_constructor_invalid( + self, config_section_azure: ConfigSectionAzure + ) -> None: + with pytest.raises( + ValidationError, + match=r"1 validation error for SHMConfig\nshm\n Field required.*", + ): + SHMConfig(azure=config_section_azure) + + def test_template(self) -> None: + config = SHMConfig.template() + assert isinstance(config, SHMConfig) + assert ( + config.azure.subscription_id + == "ID of the Azure subscription that the SHM will be deployed to" + ) + + def test_template_validation(self) -> None: + config = SHMConfig.template() + with pytest.raises(DataSafeHavenTypeError): + SHMConfig.from_yaml(config.to_yaml()) + + def test_from_yaml(self, shm_config: SHMConfig, shm_config_yaml: str) -> None: + config = SHMConfig.from_yaml(shm_config_yaml) + assert config == shm_config + assert isinstance(config.shm.fqdn, str) + + def test_from_remote( + self, mocker, context, shm_config: SHMConfig, shm_config_yaml + ) -> None: + mock_method = mocker.patch.object( + AzureSdk, "download_blob", return_value=shm_config_yaml + ) + config = SHMConfig.from_remote(context) + + assert config == shm_config + mock_method.assert_called_once_with( + SHMConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_to_yaml(self, shm_config: SHMConfig, shm_config_yaml) -> None: + assert shm_config.to_yaml() == shm_config_yaml + + def test_upload(self, mocker, context: Context, shm_config: SHMConfig) -> None: + mock_method = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + shm_config.upload(context) + + mock_method.assert_called_once_with( + shm_config.to_yaml(), + SHMConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) diff --git a/tests/config/test_sre_config.py b/tests/config/test_sre_config.py new file mode 100644 index 0000000000..66bd50a40d --- /dev/null +++ b/tests/config/test_sre_config.py @@ -0,0 +1,139 @@ +import pytest +from pydantic import ValidationError + +from data_safe_haven.config import Context, SREConfig +from data_safe_haven.config.config_sections import ( + ConfigSectionAzure, + ConfigSectionDockerHub, + ConfigSectionSRE, +) +from data_safe_haven.config.sre_config import sre_config_name +from data_safe_haven.exceptions import ( + DataSafeHavenTypeError, +) +from data_safe_haven.external import AzureSdk +from data_safe_haven.types import SoftwarePackageCategory + + +class TestConfig: + def test_constructor( + self, + config_section_azure: ConfigSectionAzure, + config_section_dockerhub: ConfigSectionDockerHub, + config_section_sre: ConfigSectionSRE, + ) -> None: + config = SREConfig( + azure=config_section_azure, + description="Sandbox Project", + dockerhub=config_section_dockerhub, + name="sandbox", + sre=config_section_sre, + ) + assert isinstance(config.azure, ConfigSectionAzure) + assert isinstance(config.name, str) + assert isinstance(config.sre, ConfigSectionSRE) + + def test_constructor_invalid( + self, + config_section_azure: ConfigSectionAzure, + config_section_dockerhub: ConfigSectionDockerHub, + ) -> None: + with pytest.raises( + ValidationError, + match=r"1 validation error for SREConfig\nsre\n Field required.*", + ): + SREConfig( + azure=config_section_azure, + description="Sandbox Project", + dockerhub=config_section_dockerhub, + name="sandbox", + ) + + @pytest.mark.parametrize( + "name", + [ + r"has spaces", + r"has!special@characters£", + r"has\tnon\rprinting\ncharacters", + r"", + ], + ) + def test_constructor_invalid_name( + self, + config_section_azure: ConfigSectionAzure, + config_section_dockerhub: ConfigSectionDockerHub, + config_section_sre: ConfigSectionSRE, + name: str, + ) -> None: + with pytest.raises( + ValidationError, + match=r"1 validation error for SREConfig\nname\n Value error, Expected valid string.*", + ): + SREConfig( + azure=config_section_azure, + description="Sandbox Project", + dockerhub=config_section_dockerhub, + name=name, + sre=config_section_sre, + ) + + def test_template(self) -> None: + config = SREConfig.template() + assert isinstance(config, SREConfig) + assert ( + config.azure.subscription_id + == "ID of the Azure subscription that the SRE will be deployed to" + ) + + def test_template_validation(self) -> None: + config = SREConfig.template() + with pytest.raises(DataSafeHavenTypeError): + SREConfig.from_yaml(config.to_yaml()) + + def test_from_yaml(self, sre_config, sre_config_yaml) -> None: + config = SREConfig.from_yaml(sre_config_yaml) + assert config == sre_config + assert isinstance(config.sre.software_packages, SoftwarePackageCategory) + + def test_from_remote( + self, mocker, context: Context, sre_config: SREConfig, sre_config_yaml: str + ) -> None: + mock_method = mocker.patch.object( + AzureSdk, "download_blob", return_value=sre_config_yaml + ) + config = SREConfig.from_remote(context) + + assert config == sre_config + mock_method.assert_called_once_with( + SREConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_to_yaml(self, sre_config: SREConfig, sre_config_yaml: str) -> None: + assert sre_config.to_yaml() == sre_config_yaml + + def test_upload(self, mocker, context, sre_config) -> None: + mock_method = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + sre_config.upload(context) + + mock_method.assert_called_once_with( + sre_config.to_yaml(), + SREConfig.default_filename, + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + +@pytest.mark.parametrize( + "value,expected", + [ + (r"Test SRE", "sre-testsre.yaml"), + (r"*a^b$c", "sre-abc.yaml"), + (r";'@-", "sre-.yaml"), + ], +) +def test_sre_config_name(value, expected): + assert sre_config_name(value) == expected diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..86fd6414e1 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,528 @@ +from pathlib import Path +from shutil import which +from subprocess import run + +import yaml +from azure.core.credentials import AccessToken, TokenCredential +from azure.mgmt.resource.subscriptions.models import Subscription +from pulumi.automation import ProjectSettings +from pytest import fixture + +import data_safe_haven.config.context_manager as context_mod +import data_safe_haven.logging.logger +from data_safe_haven import console +from data_safe_haven.config import ( + Context, + ContextManager, + DSHPulumiConfig, + DSHPulumiProject, + SHMConfig, + SREConfig, +) +from data_safe_haven.config.config_sections import ( + ConfigSectionAzure, + ConfigSectionDockerHub, + ConfigSectionSHM, + ConfigSectionSRE, + ConfigSubsectionRemoteDesktopOpts, +) +from data_safe_haven.exceptions import DataSafeHavenAzureError +from data_safe_haven.external import AzureSdk, PulumiAccount +from data_safe_haven.external.api.credentials import AzureSdkCredential +from data_safe_haven.infrastructure import SREProjectManager +from data_safe_haven.infrastructure.project_manager import ProjectManager +from data_safe_haven.logging import init_logging + + +def pytest_configure(config): + """Define constants for use across multiple tests""" + config.guid_admin = "00edec65-b071-4d26-8779-a9fe791c6e14" + config.guid_entra = "48b2425b-5f2c-4cbd-9458-0441daa8994c" + config.guid_subscription = "35ebced1-4e7a-4c1f-b634-c0886937085d" + config.guid_tenant = "d5c5c439-1115-4cb6-ab50-b8e547b6c8dd" + config.guid_user = "80b4ccfd-73ef-41b7-bb22-8ec268ec040b" + + +@fixture +def config_section_azure(request): + return ConfigSectionAzure( + location="uksouth", + subscription_id=request.config.guid_subscription, + tenant_id=request.config.guid_tenant, + ) + + +@fixture +def config_section_shm(config_section_shm_dict): + return ConfigSectionSHM(**config_section_shm_dict) + + +@fixture +def config_section_shm_dict(request): + return { + "admin_group_id": request.config.guid_admin, + "entra_tenant_id": request.config.guid_entra, + "fqdn": "shm.acme.com", + } + + +@fixture +def config_section_dockerhub() -> ConfigSectionDockerHub: + return ConfigSectionDockerHub( + access_token="dummytoken", + username="exampleuser", + ) + + +@fixture +def config_section_sre() -> ConfigSectionSRE: + return ConfigSectionSRE( + admin_email_address="admin@example.com", + admin_ip_addresses=["1.2.3.4"], + timezone="Europe/London", + ) + + +@fixture +def context(context_dict): + return Context(**context_dict) + + +@fixture +def context_dict(): + return { + "admin_group_name": "Acme Admins", + "description": "Acme Deployment", + "name": "acmedeployment", + "subscription_name": "Data Safe Haven Acme", + } + + +@fixture +def context_no_secrets(monkeypatch, context_dict) -> Context: + monkeypatch.setattr(Context, "pulumi_secrets_provider_url", None) + return Context(**context_dict) + + +@fixture +def context_manager(context_yaml) -> ContextManager: + return ContextManager.from_yaml(context_yaml) + + +@fixture +def context_tmpdir(context_dict, tmpdir, monkeypatch) -> tuple[Context, Path]: + monkeypatch.setattr(context_mod, "config_dir", lambda: Path(tmpdir)) + return (Context(**context_dict), tmpdir) + + +@fixture +def context_yaml(): + content = """--- + selected: acmedeployment + contexts: + acmedeployment: + admin_group_name: Acme Admins + description: Acme Deployment + name: acmedeployment + subscription_name: Data Safe Haven Acme + gems: + admin_group_name: Gems Admins + description: Gems + name: gems + subscription_name: Data Safe Haven Gems + """ + return yaml.dump(yaml.safe_load(content)) + + +@fixture +def local_project_settings(context_no_secrets, mocker): # noqa: ARG001 + """Overwrite adjust project settings to work locally, no secrets""" + mocker.patch.object( + ProjectManager, + "project_settings", + ProjectSettings( + name="data-safe-haven", + runtime="python", + ), + ) + + +@fixture(autouse=True, scope="session") +def local_pulumi_login(): + pulumi_path = which("pulumi") + run([pulumi_path, "login", "--local"], check=False) + yield + run([pulumi_path, "logout"], check=False) + + +@fixture(autouse=True, scope="session") +def log_directory(session_mocker, tmp_path_factory): + session_mocker.patch.object( + data_safe_haven.logging.logger, "logfile_name", return_value="test.log" + ) + log_dir = tmp_path_factory.mktemp("logs") + session_mocker.patch.object( + data_safe_haven.logging.logger, "log_dir", return_value=log_dir + ) + init_logging() + return log_dir + + +@fixture +def mock_azuresdk_blob_exists(mocker): + mocker.patch.object( + AzureSdk, + "blob_exists", + return_value=True, + ) + + +@fixture +def mock_azuresdk_get_subscription(mocker, request): + subscription = Subscription() + subscription.display_name = "Data Safe Haven Acme" + subscription.subscription_id = request.config.guid_subscription + subscription.tenant_id = request.config.guid_tenant + mocker.patch.object( + AzureSdk, + "get_subscription", + return_value=subscription, + ) + + +@fixture +def mock_azuresdk_get_credential(mocker): + class MockCredential(TokenCredential): + def get_token(*args, **kwargs): # noqa: ARG002 + return AccessToken("dummy-token", 0) + + mocker.patch.object( + AzureSdkCredential, + "get_credential", + return_value=MockCredential(), + ) + + +@fixture +def mock_azuresdk_get_credential_failure(mocker): + def fail_get_credential(): + print("mock get_credential") # noqa: T201 + msg = "mock get_credential error" + raise DataSafeHavenAzureError(msg) + + mocker.patch.object( + AzureSdkCredential, + "get_credential", + side_effect=fail_get_credential, + ) + + +@fixture +def mock_azuresdk_purge_keyvault(mocker): + mocker.patch.object( + AzureSdk, + "purge_keyvault", + return_value=True, + ) + + +@fixture +def mock_azuresdk_remove_blob(mocker): + mocker.patch.object( + AzureSdk, + "remove_blob", + return_value=None, + ) + + +@fixture +def mock_confirm_no(mocker): + return mocker.patch.object( + console, + "confirm", + return_value=False, + ) + + +@fixture +def mock_confirm_yes(mocker): + return mocker.patch.object( + console, + "confirm", + return_value=True, + ) + + +@fixture +def mock_install_plugins(mocker): + mocker.patch.object(ProjectManager, "install_plugins", return_value=None) + + +@fixture +def mock_key_vault_key(monkeypatch): + class MockKeyVaultKey: + def __init__(self, key_name, key_vault_name): + self.key_name = key_name + self.key_vault_name = key_vault_name + self.id = "mock_key/version" + + def mock_get_keyvault_key(self, key_name, key_vault_name): # noqa: ARG001 + return MockKeyVaultKey(key_name, key_vault_name) + + monkeypatch.setattr(AzureSdk, "get_keyvault_key", mock_get_keyvault_key) + + +@fixture +def mock_storage_exists(mocker): + return mocker.patch.object( + AzureSdk, + "storage_exists", + return_value=True, + ) + + +@fixture +def offline_pulumi_account(monkeypatch): + """Overwrite PulumiAccount so that it runs locally""" + monkeypatch.setattr( + PulumiAccount, "env", {"PULUMI_CONFIG_PASSPHRASE": "passphrase"} + ) + + +@fixture +def pulumi_config( + pulumi_project: DSHPulumiProject, pulumi_project_other: DSHPulumiProject +) -> DSHPulumiConfig: + return DSHPulumiConfig( + encrypted_key="CALbHybtRdxKjSnr9UYY", + projects={ + "acmedeployment": pulumi_project, + "other_project": pulumi_project_other, + }, + ) + + +@fixture +def pulumi_config_empty() -> DSHPulumiConfig: + return DSHPulumiConfig( + encrypted_key=None, + projects={}, + ) + + +@fixture +def pulumi_config_no_key( + pulumi_project: DSHPulumiProject, + pulumi_project_other: DSHPulumiProject, + pulumi_project_sandbox: DSHPulumiProject, +) -> DSHPulumiConfig: + return DSHPulumiConfig( + encrypted_key=None, + projects={ + "acmedeployment": pulumi_project, + "other_project": pulumi_project_other, + "sandbox": pulumi_project_sandbox, + }, + ) + + +@fixture +def pulumi_project(pulumi_project_stack_config) -> DSHPulumiProject: + return DSHPulumiProject( + stack_config=pulumi_project_stack_config, + ) + + +@fixture +def pulumi_project_other() -> DSHPulumiProject: + return DSHPulumiProject( + stack_config={ + "azure-native:location": "uksouth", + "azure-native:subscriptionId": "def", + "data-safe-haven:variable": "-3", + }, + ) + + +@fixture +def pulumi_project_sandbox() -> DSHPulumiProject: + return DSHPulumiProject( + stack_config={ + "azure-native:location": "uksouth", + "azure-native:subscriptionId": "ghi", + "data-safe-haven:variable": "8", + }, + ) + + +@fixture +def pulumi_project_stack_config(): + return { + "azure-native:location": "uksouth", + "azure-native:subscriptionId": "abc", + "data-safe-haven:variable": "5", + } + + +@fixture +def pulumi_config_yaml() -> str: + content = """--- + encrypted_key: CALbHybtRdxKjSnr9UYY + projects: + acmedeployment: + stack_config: + azure-native:location: uksouth + azure-native:subscriptionId: abc + data-safe-haven:variable: 5 + other_project: + stack_config: + azure-native:location: uksouth + azure-native:subscriptionId: def + data-safe-haven:variable: -3 + """ + return yaml.dump(yaml.safe_load(content)) + + +@fixture +def remote_desktop_config() -> ConfigSubsectionRemoteDesktopOpts: + return ConfigSubsectionRemoteDesktopOpts() + + +@fixture +def shm_config( + config_section_azure: ConfigSectionAzure, config_section_shm: ConfigSectionSHM +) -> SHMConfig: + return SHMConfig( + azure=config_section_azure, + shm=config_section_shm, + ) + + +@fixture +def shm_config_alternate( + config_section_azure: ConfigSectionAzure, config_section_shm: ConfigSectionSHM +) -> SHMConfig: + config_section_shm.fqdn = "shm-alternate.acme.com" + return SHMConfig( + azure=config_section_azure, + shm=config_section_shm, + ) + + +@fixture +def shm_config_file(shm_config_yaml: str, tmp_path: Path) -> Path: + config_file_path = tmp_path / "shm.yaml" + with open(config_file_path, "w") as f: + f.write(shm_config_yaml) + return config_file_path + + +@fixture +def shm_config_yaml(request): + content = ( + """--- + azure: + location: uksouth + subscription_id: guid_subscription + tenant_id: guid_tenant + shm: + admin_group_id: guid_admin + entra_tenant_id: guid_entra + fqdn: shm.acme.com + """.replace( + "guid_admin", request.config.guid_admin + ) + .replace("guid_entra", request.config.guid_entra) + .replace("guid_subscription", request.config.guid_subscription) + .replace("guid_tenant", request.config.guid_tenant) + ) + return yaml.dump(yaml.safe_load(content)) + + +@fixture +def sre_config_file(sre_config_yaml, tmp_path): + config_file_path = tmp_path / "config.yaml" + with open(config_file_path, "w") as f: + f.write(sre_config_yaml) + return config_file_path + + +@fixture +def sre_config( + config_section_azure: ConfigSectionAzure, + config_section_dockerhub: ConfigSectionDockerHub, + config_section_sre: ConfigSectionSRE, +) -> SREConfig: + return SREConfig( + azure=config_section_azure, + description="Sandbox Project", + dockerhub=config_section_dockerhub, + name="sandbox", + sre=config_section_sre, + ) + + +@fixture +def sre_config_alternate( + config_section_azure: ConfigSectionAzure, + config_section_dockerhub: ConfigSectionDockerHub, + config_section_sre: ConfigSectionSRE, +) -> SREConfig: + config_section_sre.admin_ip_addresses = ["2.3.4.5"] + return SREConfig( + azure=config_section_azure, + description="Alternative Project", + dockerhub=config_section_dockerhub, + name="alternative", + sre=config_section_sre, + ) + + +@fixture +def sre_config_yaml(request): + content = """--- + azure: + location: uksouth + subscription_id: guid_subscription + tenant_id: guid_tenant + description: Sandbox Project + dockerhub: + access_token: dummytoken + username: exampleuser + name: sandbox + sre: + admin_email_address: admin@example.com + admin_ip_addresses: + - 1.2.3.4/32 + data_provider_ip_addresses: [] + databases: [] + remote_desktop: + allow_copy: false + allow_paste: false + research_user_ip_addresses: [] + software_packages: none + timezone: Europe/London + workspace_skus: [] + """.replace( + "guid_subscription", request.config.guid_subscription + ).replace( + "guid_tenant", request.config.guid_tenant + ) + return yaml.dump(yaml.safe_load(content)) + + +@fixture +def sre_project_manager( + context_no_secrets, + sre_config, + pulumi_config_no_key, + local_project_settings, # noqa: ARG001 + mock_azuresdk_get_subscription, # noqa: ARG001 + mock_azuresdk_get_credential, # noqa: ARG001 + offline_pulumi_account, # noqa: ARG001 +): + return SREProjectManager( + context=context_no_secrets, + config=sre_config, + pulumi_config=pulumi_config_no_key, + ) diff --git a/tests/console/test_format.py b/tests/console/test_format.py new file mode 100644 index 0000000000..02627dc3dd --- /dev/null +++ b/tests/console/test_format.py @@ -0,0 +1,44 @@ +from data_safe_haven.console.format import tabulate + + +class TestTabulate: + def test_tabulate(self, capsys): + header = ["head1", "head2"] + rows = [["11", "12"], ["21", "22"]] + tabulate(header=header, rows=rows) + captured = capsys.readouterr() + for line in [ + "┏━━━━━━━┳━━━━━━━┓", + "┃ head1 ┃ head2 ┃", + "┡━━━━━━━╇━━━━━━━┩", + "│ 11 │ 12 │", + "│ 21 │ 22 │", + "└───────┴───────┘", + ]: + assert line in captured.out + + def test_tabulate_no_header(self, capsys): + rows = [["11", "12"], ["21", "22"]] + tabulate(rows=rows) + captured = capsys.readouterr() + for line in [ + "┏━━━━┳━━━━┓", + "┃ ┃ ┃", + "┡━━━━╇━━━━┩", + "│ 11 │ 12 │", + "│ 21 │ 22 │", + "└────┴────┘", + ]: + assert line in captured.out + + def test_tabulate_no_rows(self, capsys): + header = ["head1", "head2"] + tabulate(header=header) + captured = capsys.readouterr() + for line in [ + "┏━━━━━━━┳━━━━━━━┓", + "┃ head1 ┃ head2 ┃", + "┡━━━━━━━╇━━━━━━━┩", + "└───────┴───────┘", + ]: + assert line in captured.out diff --git a/tests/console/test_pretty.py b/tests/console/test_pretty.py new file mode 100644 index 0000000000..dd487ea7c4 --- /dev/null +++ b/tests/console/test_pretty.py @@ -0,0 +1,29 @@ +import pytest + +from data_safe_haven.console.pretty import pretty_print + + +class TestPrettyPrint: + @pytest.mark.parametrize( + "objects,sep,expected,not_expected", + [ + (["hello"], None, "hello", None), + (["[green]hello[/]"], None, "hello", "[green]"), + (["[bold red]hello[/]"], None, "hello", "[bold red]"), + (["hello", "world"], None, "hello world", None), + (["hello", "world"], "\n", "hello\nworld", None), + ([(1, 2, 3)], "\n", "(1, 2, 3)", None), + (["[link=https://example.com]abc[/]"], None, "abc", "example"), + ], + ) + def test_pretty_print(self, objects, sep, expected, not_expected, capsys): + if sep is not None: + pretty_print(*objects, sep=sep) + else: + pretty_print(*objects) + + captured = capsys.readouterr() + assert expected in captured.out + + if not_expected is not None: + assert not_expected not in captured.out diff --git a/tests/console/test_prompts.py b/tests/console/test_prompts.py new file mode 100644 index 0000000000..5b11fbb41b --- /dev/null +++ b/tests/console/test_prompts.py @@ -0,0 +1,36 @@ +import io +import sys + +import pytest + +from data_safe_haven.console.prompts import confirm + + +class TestConfirm: + @pytest.mark.parametrize( + "default_to_yes,default_to_yes_string,response,expected_result", + [ + (True, "y", "y", True), + (True, "y", "n", False), + (True, "y", "\n", True), + (False, "n", "y", True), + (False, "n", "n", False), + (False, "n", "\n", False), + ], + ) + def test_confirm( + self, + default_to_yes, + default_to_yes_string, + response, + expected_result, + capsys, + mocker, + ): + mocker.patch.object(sys, "stdin", io.StringIO(response)) + + result = confirm("yes or no?", default_to_yes=default_to_yes) + assert result is expected_result + + captured = capsys.readouterr() + assert f"yes or no? [y/n] ({default_to_yes_string}):" in captured.out diff --git a/tests/external/api/conftest.py b/tests/external/api/conftest.py new file mode 100644 index 0000000000..7f5a59bb63 --- /dev/null +++ b/tests/external/api/conftest.py @@ -0,0 +1,152 @@ +import datetime +import os + +import jwt +import pytest +from azure.core.credentials import AccessToken +from azure.identity import ( + AuthenticationRecord, + AzureCliCredential, + DeviceCodeCredential, +) + +from data_safe_haven.external import GraphApi +from data_safe_haven.external.api.credentials import GraphApiCredential + + +def pytest_configure(config): + """Define constants for use across multiple tests""" + config.user_upn = "username@example.com" + + +@pytest.fixture +def authentication_record(request): + return AuthenticationRecord( + tenant_id=request.config.guid_tenant, + client_id="14d82eec-204b-4c2f-b7e8-296a70dab67e", + authority="login.microsoftonline.com", + home_account_id=request.config.guid_user, + username=request.config.user_upn, + ) + + +@pytest.fixture +def azure_cli_token(request): + return jwt.encode( + { + "name": "username", + "oid": request.config.guid_user, + "upn": request.config.user_upn, + "tid": request.config.guid_tenant, + }, + "key", + ) + + +@pytest.fixture +def graph_api_token(request): + return jwt.encode( + { + "scp": "GroupMember.Read.All User.Read.All", + "tid": request.config.guid_tenant, + }, + "key", + ) + + +@pytest.fixture +def mock_authenticationrecord_deserialize(mocker, authentication_record): + return mocker.patch.object( + AuthenticationRecord, + "deserialize", + return_value=authentication_record, + ) + + +@pytest.fixture +def mock_azureclicredential_get_token(mocker, azure_cli_token): + return mocker.patch.object( + AzureCliCredential, + "get_token", + return_value=AccessToken(azure_cli_token, 0), + ) + + +@pytest.fixture +def mock_azureclicredential_get_token_invalid(mocker): + return mocker.patch.object( + AzureCliCredential, + "get_token", + return_value=AccessToken("not a jwt", 0), + ) + + +@pytest.fixture +def mock_devicecodecredential_authenticate(mocker, authentication_record): + return mocker.patch.object( + DeviceCodeCredential, + "authenticate", + return_value=authentication_record, + ) + + +@pytest.fixture +def mock_devicecodecredential_get_token(mocker, graph_api_token): + return mocker.patch.object( + DeviceCodeCredential, + "get_token", + return_value=AccessToken(graph_api_token, 0), + ) + + +@pytest.fixture +def mock_devicecodecredential_new(mocker, authentication_record): + class MockDeviceCodeCredential: + def __init__(self, *args, prompt_callback, **kwargs): # noqa: ARG002 + self.prompt_callback = prompt_callback + + def authenticate(self, *args, **kwargs): # noqa: ARG002 + self.prompt_callback( + "VERIFICATION_URI", + "USER_DEVICE_CODE", + datetime.datetime.now(tz=datetime.UTC), + ) + return authentication_record + + mocker.patch.object( + DeviceCodeCredential, + "__new__", + lambda *args, **kwargs: MockDeviceCodeCredential(*args, **kwargs), + ) + + +@pytest.fixture +def mock_graphapi_read_domains(mocker): + mocker.patch.object( + GraphApi, + "read_domains", + return_value=[{"id": "example.com"}], + ) + + +@pytest.fixture +def mock_graphapicredential_get_credential(mocker): + mocker.patch.object( + GraphApiCredential, + "get_credential", + return_value=DeviceCodeCredential(), + ) + + +@pytest.fixture +def mock_graphapicredential_get_token(mocker, graph_api_token): + mocker.patch.object( + GraphApiCredential, + "get_token", + return_value=AccessToken(graph_api_token, 0), + ) + + +@pytest.fixture +def tmp_config_dir(mocker, tmp_path): + mocker.patch.dict(os.environ, {"DSH_CONFIG_DIRECTORY": str(tmp_path)}) diff --git a/tests/external/api/test_azure_sdk.py b/tests/external/api/test_azure_sdk.py new file mode 100644 index 0000000000..4c9e1ae031 --- /dev/null +++ b/tests/external/api/test_azure_sdk.py @@ -0,0 +1,328 @@ +import pytest +from azure.core.exceptions import ClientAuthenticationError, ResourceNotFoundError +from azure.mgmt.keyvault.v2023_07_01.models import DeletedVault +from azure.mgmt.resource.subscriptions import SubscriptionClient +from azure.mgmt.resource.subscriptions.models import Subscription +from azure.mgmt.storage.v2021_08_01.models import ( + StorageAccountListKeysResult, +) +from pytest import fixture + +import data_safe_haven.external.api.azure_sdk +from data_safe_haven.exceptions import ( + DataSafeHavenAzureAPIAuthenticationError, + DataSafeHavenAzureError, + DataSafeHavenAzureStorageError, + DataSafeHavenValueError, +) +from data_safe_haven.external import AzureSdk, GraphApi + + +@fixture +def mock_blob_client(monkeypatch): + class MockBlobClient: + def __init__( + self, + resource_group_name, # noqa: ARG002 + storage_account_name, # noqa: ARG002 + storage_container_name, # noqa: ARG002 + blob_name, + ): + self.blob_name = blob_name + + def exists(self): + if self.blob_name == "exists": + return True + else: + return False + + def mock_blob_client( + self, # noqa: ARG001 + resource_group_name, + storage_account_name, + storage_container_name, + blob_name, + ): + return MockBlobClient( + resource_group_name, + storage_account_name, + storage_container_name, + blob_name, + ) + + monkeypatch.setattr( + data_safe_haven.external.api.azure_sdk.AzureSdk, "blob_client", mock_blob_client + ) + + +@fixture +def mock_key_client(monkeypatch): + class MockKeyClient: + def __init__(self, vault_url, credential): + self.vault_url = vault_url + self.credential = credential + + def get_key(self, key_name): + if key_name == "exists": + return f"key: {key_name}" + else: + raise ResourceNotFoundError + + monkeypatch.setattr( + data_safe_haven.external.api.azure_sdk, "KeyClient", MockKeyClient + ) + + +@fixture +def mock_key_vault_management_client(monkeypatch): + class Poller: + def done(self): + return True + + class MockVaultsOperations: + def __init__(self, vault_name, location): + self._vault_name = vault_name + self._location = location + + def get_deleted(self, vault_name, location): + if self._vault_name == vault_name and self._location == location: + print( # noqa: T201 + f"Found deleted key vault {vault_name} in {location}" + ) + return DeletedVault() + print("Found no deleted key vaults") # noqa: T201 + return None + + def begin_purge_deleted(self, vault_name, location): + if self._vault_name == vault_name and self._location == location: + print( # noqa: T201 + f"Purging deleted key vault {vault_name} in {location}" + ) + self._vault_name = None + return Poller() + + class MockKeyVaultManagementClient: + def __init__(self, *args, **kwargs): # noqa: ARG002 + self.vaults = MockVaultsOperations("key_vault_name", "location") + + monkeypatch.setattr( + data_safe_haven.external.api.azure_sdk, + "KeyVaultManagementClient", + MockKeyVaultManagementClient, + ) + + +@fixture +def mock_storage_management_client(monkeypatch): + class MockStorageAccount: + def __init__(self, name): + self.name = name + + class MockStorageAccountsOperations: + def list(self): + return [ + MockStorageAccount("shmstorageaccount"), + MockStorageAccount("shmstorageaccounter"), + MockStorageAccount("shmstorageaccountest"), + ] + + def list_keys( + self, resource_group_name, account_name, **kwargs # noqa: ARG002 + ): + if account_name == "shmstorageaccount": + return StorageAccountListKeysResult() + else: + return None + + class MockStorageManagementClient: + def __init__(self, *args, **kwargs): # noqa: ARG002 + self.storage_accounts = MockStorageAccountsOperations() + + monkeypatch.setattr( + data_safe_haven.external.api.azure_sdk, + "StorageManagementClient", + MockStorageManagementClient, + ) + + +@fixture +def mock_subscription_client(monkeypatch, request): + class MockSubscriptionsOperations: + def __init__(self, *args, **kwargs): + pass + + def list(self): + subscription_1 = Subscription() + subscription_1.display_name = "Subscription 1" + subscription_1.id = request.config.guid_subscription + subscription_2 = Subscription() + subscription_2.display_name = "Subscription 2" + return [subscription_1, subscription_2] + + class MockSubscriptionClient: + def __init__(self, *args, **kwargs): + pass + + @property + def subscriptions(self): + return MockSubscriptionsOperations() + + monkeypatch.setattr( + data_safe_haven.external.api.azure_sdk, + "SubscriptionClient", + MockSubscriptionClient, + ) + + +class TestAzureSdk: + def test_entra_directory(self): + sdk = AzureSdk("subscription name") + assert isinstance(sdk.entra_directory, GraphApi) + + def test_subscription_id( + self, + request, + mock_azuresdk_get_subscription, # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + assert sdk.subscription_id == request.config.guid_subscription + + def test_tenant_id( + self, + request, + mock_azuresdk_get_subscription, # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + assert sdk.tenant_id == request.config.guid_tenant + + def test_blob_exists(self, mock_blob_client, mock_storage_exists): # noqa: ARG002 + sdk = AzureSdk("subscription name") + exists = sdk.blob_exists( + "exists", "resource_group", "storage_account", "storage_container" + ) + assert isinstance(exists, bool) + assert exists + + mock_storage_exists.assert_called_once_with( + "storage_account", + ) + + def test_blob_exists_no_storage( + self, + mocker, + mock_blob_client, # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + mocker.patch.object(sdk, "storage_exists", return_value=False) + with pytest.raises( + DataSafeHavenAzureStorageError, + match="Storage account 'storage_account' does not exist", + ): + sdk.blob_exists( + "exists", "resource_group", "storage_account", "storage_container" + ) + + def test_blob_does_not_exist( + self, mock_blob_client, mock_storage_exists # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + exists = sdk.blob_exists( + "abc.txt", "resource_group", "storage_account", "storage_container" + ) + assert isinstance(exists, bool) + assert not exists + + mock_storage_exists.assert_called_once_with( + "storage_account", + ) + + def test_get_keyvault_key(self, mock_key_client): # noqa: ARG002 + sdk = AzureSdk("subscription name") + key = sdk.get_keyvault_key("exists", "key vault name") + assert key == "key: exists" + + def test_get_keyvault_key_missing(self, mock_key_client): # noqa: ARG002 + sdk = AzureSdk("subscription name") + with pytest.raises( + DataSafeHavenAzureError, match="Failed to retrieve key does not exist" + ): + sdk.get_keyvault_key("does not exist", "key vault name") + + @pytest.mark.parametrize( + "storage_account_name", + [("shmstorageaccount"), ("shmstoragenonexistent")], + ) + def test_get_storage_account_keys( + self, + storage_account_name, + mock_storage_management_client, # noqa: ARG002 + mock_azuresdk_get_subscription, # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + if storage_account_name == "shmstorageaccount": + error_text = "No keys were retrieved" + else: + error_text = "Could not connect to storage account" + + with pytest.raises(DataSafeHavenAzureStorageError, match=error_text): + sdk.get_storage_account_keys("resource group", storage_account_name) + + def test_get_subscription(self, request, mock_subscription_client): # noqa: ARG002 + sdk = AzureSdk("subscription name") + subscription = sdk.get_subscription("Subscription 1") + assert isinstance(subscription, Subscription) + assert subscription.display_name == "Subscription 1" + assert subscription.id == request.config.guid_subscription + + def test_get_subscription_does_not_exist( + self, mock_subscription_client # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + with pytest.raises( + DataSafeHavenValueError, + match="Could not find subscription 'Subscription 3'", + ): + sdk.get_subscription("Subscription 3") + + def test_get_subscription_authentication_error(self, mocker): + def raise_client_authentication_error(*args): # noqa: ARG001 + raise ClientAuthenticationError + + mocker.patch.object( + SubscriptionClient, "__new__", side_effect=raise_client_authentication_error + ) + sdk = AzureSdk("subscription name") + with pytest.raises( + DataSafeHavenAzureAPIAuthenticationError, + match="Failed to authenticate with Azure API.", + ): + sdk.get_subscription("Subscription 1") + + def test_purge_keyvault( + self, + mock_azuresdk_get_subscription, # noqa: ARG002 + mock_azuresdk_get_credential, # noqa: ARG002 + mock_key_vault_management_client, # noqa: ARG002 + capsys, + ): + sdk = AzureSdk("subscription name") + sdk.purge_keyvault("key_vault_name", "location") + stdout, _ = capsys.readouterr() + assert "Found deleted key vault key_vault_name in location" in stdout + assert "Purging deleted key vault key_vault_name in location" in stdout + assert "Purged Key Vault key_vault_name" in stdout + + @pytest.mark.parametrize( + "storage_account_name,exists", + [("shmstorageaccount", True), ("shmstoragenonexistent", False)], + ) + def test_storage_exists( + self, + storage_account_name, + exists, + mock_storage_management_client, # noqa: ARG002 + mock_azuresdk_get_subscription, # noqa: ARG002 + ): + sdk = AzureSdk("subscription name") + + assert sdk.storage_exists(storage_account_name) == exists diff --git a/tests/external/api/test_credentials.py b/tests/external/api/test_credentials.py new file mode 100644 index 0000000000..c0e631e912 --- /dev/null +++ b/tests/external/api/test_credentials.py @@ -0,0 +1,173 @@ +import pytest +from azure.identity import ( + AzureCliCredential, + DeviceCodeCredential, +) + +from data_safe_haven.directories import config_dir +from data_safe_haven.exceptions import DataSafeHavenAzureError +from data_safe_haven.external.api.credentials import ( + AzureSdkCredential, + DeferredCredential, + GraphApiCredential, +) + + +class TestDeferredCredential: + def test_confirm_credentials_interactive( + self, + mock_confirm_yes, # noqa: ARG002 + mock_azureclicredential_get_token, # noqa: ARG002 + capsys, + request, + ): + DeferredCredential.cache_ = set() + credential = AzureSdkCredential(skip_confirmation=False) + credential.get_credential() + out, _ = capsys.readouterr() + assert "You are logged into the Azure CLI as" in out + assert f"user: username ({request.config.guid_user})" in out + assert f"tenant: example.com ({request.config.guid_tenant})" in out + + def test_confirm_credentials_interactive_fail( + self, + mock_confirm_no, # noqa: ARG002 + mock_azureclicredential_get_token, # noqa: ARG002 + ): + DeferredCredential.cache_ = set() + credential = AzureSdkCredential(skip_confirmation=False) + with pytest.raises( + DataSafeHavenAzureError, + match="Error getting account information from Azure CLI.", + ): + credential.get_credential() + + def test_confirm_credentials_interactive_cache( + self, + mock_confirm_yes, # noqa: ARG002 + mock_azureclicredential_get_token, # noqa: ARG002 + capsys, + request, + ): + DeferredCredential.cache_ = { + (request.config.guid_user, request.config.guid_tenant) + } + credential = AzureSdkCredential(skip_confirmation=False) + credential.get_credential() + out, _ = capsys.readouterr() + assert "You are logged into the Azure CLI as" not in out + + def test_decode_token_error( + self, mock_azureclicredential_get_token_invalid # noqa: ARG002 + ): + credential = AzureSdkCredential(skip_confirmation=True) + with pytest.raises( + DataSafeHavenAzureError, + match="Error getting account information from Azure CLI.", + ): + credential.decode_token(credential.token) + + +class TestAzureSdkCredential: + def test_get_credential(self, mock_azureclicredential_get_token): # noqa: ARG002 + credential = AzureSdkCredential(skip_confirmation=True) + assert isinstance(credential.get_credential(), AzureCliCredential) + + def test_get_token(self, mock_azureclicredential_get_token): # noqa: ARG002 + credential = AzureSdkCredential(skip_confirmation=True) + assert isinstance(credential.token, str) + + def test_decode_token( + self, + request, + mock_azureclicredential_get_token, # noqa: ARG002 + ): + credential = AzureSdkCredential(skip_confirmation=True) + decoded = credential.decode_token(credential.token) + assert decoded["name"] == "username" + assert decoded["oid"] == request.config.guid_user + assert decoded["upn"] == "username@example.com" + assert decoded["tid"] == request.config.guid_tenant + + +class TestGraphApiCredential: + def test_authentication_record_is_used( + self, + request, + authentication_record, + mock_authenticationrecord_deserialize, + mock_devicecodecredential_authenticate, # noqa: ARG002 + tmp_config_dir, # noqa: ARG002 + ): + # Write an authentication record + cache_name = f"dsh-{request.config.guid_tenant}" + authentication_record_path = ( + config_dir() / f".msal-authentication-cache-{cache_name}" + ) + serialised_record = authentication_record.serialize() + with open(authentication_record_path, "w") as f_auth: + f_auth.write(serialised_record) + + # Get a credential + credential = GraphApiCredential( + request.config.guid_tenant, skip_confirmation=True + ) + credential.get_credential() + + # Remove the authentication record + authentication_record_path.unlink(missing_ok=True) + + mock_authenticationrecord_deserialize.assert_called_once_with(serialised_record) + + def test_decode_token( + self, + request, + mock_graphapicredential_get_token, # noqa: ARG002 + ): + credential = GraphApiCredential( + request.config.guid_tenant, skip_confirmation=True + ) + decoded = credential.decode_token(credential.token) + assert decoded["scp"] == "GroupMember.Read.All User.Read.All" + assert decoded["tid"] == request.config.guid_tenant + + def test_get_credential( + self, + request, + mock_devicecodecredential_authenticate, # noqa: ARG002 + mock_devicecodecredential_get_token, # noqa: ARG002 + tmp_config_dir, # noqa: ARG002 + ): + credential = GraphApiCredential( + request.config.guid_tenant, skip_confirmation=True + ) + assert isinstance(credential.get_credential(), DeviceCodeCredential) + + def test_get_credential_callback( + self, + capsys, + request, + mock_devicecodecredential_new, # noqa: ARG002 + tmp_config_dir, # noqa: ARG002 + ): + credential = GraphApiCredential( + request.config.guid_tenant, skip_confirmation=True + ) + credential.get_credential() + captured = capsys.readouterr() + cleaned_stdout = " ".join(captured.out.split()) + assert ( + "Go to VERIFICATION_URI in a web browser and enter the code USER_DEVICE_CODE at the prompt." + in cleaned_stdout + ) + + def test_get_token( + self, + request, + mock_devicecodecredential_get_token, # noqa: ARG002 + mock_graphapicredential_get_credential, # noqa: ARG002 + ): + credential = GraphApiCredential( + request.config.guid_tenant, skip_confirmation=True + ) + assert isinstance(credential.token, str) diff --git a/tests/external/api/test_graph_api.py b/tests/external/api/test_graph_api.py new file mode 100644 index 0000000000..f9095db3de --- /dev/null +++ b/tests/external/api/test_graph_api.py @@ -0,0 +1,78 @@ +import pytest +import requests + +from data_safe_haven.exceptions import ( + DataSafeHavenMicrosoftGraphError, + DataSafeHavenValueError, +) +from data_safe_haven.external import GraphApi + + +class TestGraphApi: + def test_from_scopes(self, request): + api = GraphApi.from_scopes( + scopes=["scope1", "scope2"], tenant_id=request.config.guid_tenant + ) + assert api.credential.tenant_id == request.config.guid_tenant + assert "scope1" in api.credential.scopes + assert "scope2" in api.credential.scopes + + def test_from_token(self, request, graph_api_token): + api = GraphApi.from_token(graph_api_token) + assert api.credential.tenant_id == request.config.guid_tenant + assert "GroupMember.Read.All" in api.credential.scopes + assert "User.Read.All" in api.credential.scopes + + def test_from_token_invalid(self): + with pytest.raises( + DataSafeHavenValueError, + match="Could not construct GraphApi from provided token.", + ): + GraphApi.from_token("not a jwt") + + def test_add_custom_domain( + self, + request, + requests_mock, + mock_graphapicredential_get_token, # noqa: ARG002 + ): + domain_name = "example.com" + requests_mock.get( + "https://graph.microsoft.com/v1.0/domains", + json={"value": [{"id": domain_name}, {"id": "example.org"}]}, + ) + requests_mock.get( + f"https://graph.microsoft.com/v1.0/domains/{domain_name}/verificationDnsRecords", + json={ + "value": [ + {"recordType": "Caa", "text": "caa-record-text"}, + {"recordType": "Txt", "text": "txt-record-text"}, + ] + }, + ) + api = GraphApi.from_scopes(scopes=[], tenant_id=request.config.guid_tenant) + result = api.add_custom_domain(domain_name) + assert result == "txt-record-text" + + def test_http_get_failure( + self, + request, + requests_mock, + mock_graphapicredential_get_token, # noqa: ARG002 + ): + url = "https://example.com" + requests_mock.get(url, exc=requests.exceptions.ConnectTimeout) + api = GraphApi.from_scopes(scopes=[], tenant_id=request.config.guid_tenant) + with pytest.raises( + DataSafeHavenMicrosoftGraphError, + match="Could not execute GET request to 'https://example.com'.", + ): + api.http_get(url) + + def test_token( + self, + graph_api_token, + mock_graphapicredential_get_token, # noqa: ARG002 + ): + api = GraphApi.from_token(graph_api_token) + assert api.token == graph_api_token diff --git a/tests/functions/test_network.py b/tests/functions/test_network.py new file mode 100644 index 0000000000..3646f1b2af --- /dev/null +++ b/tests/functions/test_network.py @@ -0,0 +1,46 @@ +import pytest +import requests + +from data_safe_haven.exceptions import DataSafeHavenValueError +from data_safe_haven.functions import current_ip_address, ip_address_in_list + + +class TestCurrentIpAddress: + def test_output(self, requests_mock): + requests_mock.get("https://api.ipify.org", text="1.2.3.4") + ip_address = current_ip_address() + assert ip_address == "1.2.3.4" + + def test_request_not_resolved(self, requests_mock): + requests_mock.get( + "https://api.ipify.org", exc=requests.exceptions.ConnectTimeout + ) + with pytest.raises(DataSafeHavenValueError) as exc_info: + current_ip_address() + assert exc_info.match(r"Could not determine IP address.") + + +class TestIpAddressInList: + def test_is_in_list(self, requests_mock): + requests_mock.get("https://api.ipify.org", text="1.2.3.4") + assert ip_address_in_list(["1.2.3.4", "2.3.4.5"]) + + def test_is_not_in_list(self, requests_mock): + requests_mock.get("https://api.ipify.org", text="1.2.3.4") + assert not ip_address_in_list(["2.3.4.5"]) + + def test_is_in_cidr_list(self, requests_mock): + requests_mock.get("https://api.ipify.org", text="1.2.3.4") + assert ip_address_in_list(["1.2.3.4/32", "2.3.4.5/32"]) + + def test_is_in_non_trivial_cidr_list(self, requests_mock): + requests_mock.get("https://api.ipify.org", text="1.2.3.4") + assert ip_address_in_list(["1.2.3.0/29", "2.3.4.0/29"]) + + def test_not_resolved(self, requests_mock): + requests_mock.get( + "https://api.ipify.org", exc=requests.exceptions.ConnectTimeout + ) + with pytest.raises(DataSafeHavenValueError) as exc_info: + ip_address_in_list(["2.3.4.5"]) + assert exc_info.match(r"Could not determine IP address.") diff --git a/tests/functions/test_strings.py b/tests/functions/test_strings.py new file mode 100644 index 0000000000..43b6c8702a --- /dev/null +++ b/tests/functions/test_strings.py @@ -0,0 +1,76 @@ +import pytest +from freezegun import freeze_time + +from data_safe_haven.exceptions import DataSafeHavenValueError +from data_safe_haven.functions import get_key_vault_name, json_safe, next_occurrence + + +class TestNextOccurrence: + @pytest.mark.parametrize( + "hour,minute,timezone,expected", + [ + (5, 13, "Australia/Perth", "2024-01-02T21:13:00+00:00"), + (0, 13, "Australia/Perth", "2024-01-02T16:13:00+00:00"), + (20, 13, "Australia/Perth", "2024-01-02T12:13:00+00:00"), + (20, 13, "Europe/London", "2024-01-02T20:13:00+00:00"), + ], + ) + @freeze_time("1am on Jan 2nd, 2024") + def test_next_occurrence(self, hour, minute, timezone, expected): + next_time = next_occurrence(hour, minute, timezone) + assert next_time == expected + + @freeze_time("1am on July 2nd, 2024") + def test_dst(self): + next_time = next_occurrence(13, 5, "Europe/London") + assert next_time == "2024-07-02T12:05:00+00:00" + + @freeze_time("1am on Jan 2nd, 2024") + def test_timeformat(self): + next_time = next_occurrence(5, 13, "Australia/Perth", time_format="iso_minute") + assert next_time == "2024-01-02 21:13" + + @freeze_time("9pm on Jan 2nd, 2024") + def test_is_tomorrow(self): + next_time = next_occurrence(5, 13, "Australia/Perth") + assert next_time == "2024-01-03T21:13:00+00:00" + + def test_invalid_hour(self): + with pytest.raises(DataSafeHavenValueError) as exc_info: + next_occurrence(99, 13, "Europe/London") + assert exc_info.match(r"Time '99:13' was not recognised.") + + def test_invalid_minute(self): + with pytest.raises(DataSafeHavenValueError) as exc_info: + next_occurrence(5, 99, "Europe/London") + assert exc_info.match(r"Time '5:99' was not recognised.") + + def test_invalid_timezone(self): + with pytest.raises(DataSafeHavenValueError) as exc_info: + next_occurrence(5, 13, "Mars/OlympusMons") + assert exc_info.match(r"Timezone 'Mars/OlympusMons' was not recognised.") + + def test_invalid_timeformat(self): + with pytest.raises(DataSafeHavenValueError) as exc_info: + next_occurrence(5, 13, "Australia/Perth", time_format="invalid") + assert exc_info.match(r"Time format 'invalid' was not recognised.") + + +@pytest.mark.parametrize( + "value,expected", + [ + (r"shm-a-sre-b", "shmasrebsecrets"), + (r"shm-verylongshmname-sre-verylongsrename", "shmverylsreverylosecrets"), + (r"a-long-string-with-lots-of-tokens", "alostrwitlotoftoksecrets"), + ], +) +def test_get_key_vault_name(value, expected): + assert get_key_vault_name(value) == expected + + +@pytest.mark.parametrize( + "value,expected", + [(r"Test SRE", "testsre"), (r"%*aBc", "abc"), (r"MY_SRE", "mysre")], +) +def test_json_safe(value, expected): + assert json_safe(value) == expected diff --git a/tests/infrastructure/common/test_ip_ranges.py b/tests/infrastructure/common/test_ip_ranges.py new file mode 100644 index 0000000000..3dab535193 --- /dev/null +++ b/tests/infrastructure/common/test_ip_ranges.py @@ -0,0 +1,52 @@ +from data_safe_haven.external import AzureIPv4Range +from data_safe_haven.infrastructure.common import SREDnsIpRanges, SREIpRanges + + +class TestSREIpRanges: + + def test_vnet_and_subnets(self): + assert SREIpRanges.vnet == AzureIPv4Range("10.0.0.0", "10.0.255.255") + assert SREIpRanges.application_gateway == AzureIPv4Range( + "10.0.0.0", "10.0.0.255" + ) + assert SREIpRanges.apt_proxy_server == AzureIPv4Range("10.0.1.0", "10.0.1.7") + assert SREIpRanges.clamav_mirror == AzureIPv4Range("10.0.1.8", "10.0.1.15") + assert SREIpRanges.data_configuration == AzureIPv4Range( + "10.0.1.16", "10.0.1.23" + ) + assert SREIpRanges.data_desired_state == AzureIPv4Range( + "10.0.1.24", "10.0.1.31" + ) + assert SREIpRanges.data_private == AzureIPv4Range("10.0.1.32", "10.0.1.39") + assert SREIpRanges.firewall == AzureIPv4Range("10.0.1.64", "10.0.1.127") + assert SREIpRanges.firewall_management == AzureIPv4Range( + "10.0.1.128", "10.0.1.191" + ) + assert SREIpRanges.guacamole_containers == AzureIPv4Range( + "10.0.1.40", "10.0.1.47" + ) + assert SREIpRanges.guacamole_containers_support == AzureIPv4Range( + "10.0.1.48", "10.0.1.55" + ) + assert SREIpRanges.identity_containers == AzureIPv4Range( + "10.0.1.56", "10.0.1.63" + ) + assert SREIpRanges.monitoring == AzureIPv4Range("10.0.1.192", "10.0.1.223") + assert SREIpRanges.user_services_containers == AzureIPv4Range( + "10.0.1.224", "10.0.1.231" + ) + assert SREIpRanges.user_services_containers_support == AzureIPv4Range( + "10.0.1.232", "10.0.1.239" + ) + assert SREIpRanges.user_services_databases == AzureIPv4Range( + "10.0.1.240", "10.0.1.247" + ) + assert SREIpRanges.user_services_software_repositories == AzureIPv4Range( + "10.0.1.248", "10.0.1.255" + ) + assert SREIpRanges.workspaces == AzureIPv4Range("10.0.2.0", "10.0.2.255") + + +class TestSREDnsIpRanges: + def test_vnet(self): + assert SREDnsIpRanges.vnet == AzureIPv4Range("192.168.0.0", "192.168.0.7") diff --git a/tests/infrastructure/programs/resource_assertions.py b/tests/infrastructure/programs/resource_assertions.py new file mode 100644 index 0000000000..a02a499070 --- /dev/null +++ b/tests/infrastructure/programs/resource_assertions.py @@ -0,0 +1,17 @@ +"""Functions for testing Pulumi resources inside an apply loop""" + +import json + + +def assert_equal(target, source): + try: + assert source == target + except AssertionError as exc: + msg = f"'{source}' {type(source)} and '{target}' {type(target)} are not equal." + raise ValueError(msg) from exc + + +def assert_equal_json(target, source): + json_source = json.dumps(source, sort_keys=True) + json_target = json.dumps(target, sort_keys=True) + assert_equal(json_source, json_target) diff --git a/tests/infrastructure/programs/sre/conftest.py b/tests/infrastructure/programs/sre/conftest.py new file mode 100644 index 0000000000..efcbe0c921 --- /dev/null +++ b/tests/infrastructure/programs/sre/conftest.py @@ -0,0 +1,77 @@ +from pulumi_azure_native import managedidentity, network, resources +from pytest import fixture + +from data_safe_haven.infrastructure.common import SREIpRanges + + +# +# Constants +# +@fixture +def location() -> str: + return "uksouth" + + +@fixture +def resource_group_name() -> str: + return "rg-example" + + +@fixture +def resource_group(location, resource_group_name) -> resources.ResourceGroup: + return resources.ResourceGroup( + "resource_group", + location=location, + resource_group_name=resource_group_name, + ) + + +@fixture +def sre_fqdn() -> str: + return "sre.example.com" + + +@fixture +def sre_index() -> int: + return 1 + + +@fixture +def stack_name() -> str: + return "stack-example" + + +@fixture +def tags() -> dict[str, str]: + return {"key": "value"} + + +# +# Pulumi resources +# +@fixture +def identity_key_vault_reader( + location, resource_group_name, stack_name +) -> managedidentity.UserAssignedIdentity: + return managedidentity.UserAssignedIdentity( + "identity_key_vault_reader", + location=location, + resource_group_name=resource_group_name, + resource_name_=f"{stack_name}-id-key-vault-reader", + ) + + +@fixture +def subnet_application_gateway() -> network.GetSubnetResult: + return network.GetSubnetResult( + address_prefix=SREIpRanges.application_gateway.prefix, + id="subnet_application_gateway_id", + ) + + +@fixture +def subnet_guacamole_containers() -> network.GetSubnetResult: + return network.GetSubnetResult( + address_prefix=SREIpRanges.guacamole_containers.prefix, + id="subnet_guacamole_containers_id", + ) diff --git a/tests/infrastructure/programs/sre/test_application_gateway.py b/tests/infrastructure/programs/sre/test_application_gateway.py new file mode 100644 index 0000000000..f0085a69a4 --- /dev/null +++ b/tests/infrastructure/programs/sre/test_application_gateway.py @@ -0,0 +1,677 @@ +from functools import partial + +import pulumi +import pytest +from pulumi_azure_native import network + +from data_safe_haven.infrastructure.programs.sre.application_gateway import ( + SREApplicationGatewayComponent, + SREApplicationGatewayProps, +) + +from ..resource_assertions import assert_equal, assert_equal_json + + +@pytest.fixture +def application_gateway_props( + identity_key_vault_reader, + location, + resource_group, + sre_fqdn, + subnet_application_gateway, + subnet_guacamole_containers, +) -> SREApplicationGatewayProps: + return SREApplicationGatewayProps( + key_vault_certificate_id="key_vault_certificate_id", + key_vault_identity=identity_key_vault_reader, + location=location, + resource_group=resource_group, + sre_fqdn=sre_fqdn, + subnet_application_gateway=subnet_application_gateway, + subnet_guacamole_containers=subnet_guacamole_containers, + ) + + +@pytest.fixture +def application_gateway_component( + application_gateway_props, + stack_name, + tags, +) -> SREApplicationGatewayComponent: + return SREApplicationGatewayComponent( + name="ag-name", + stack_name=stack_name, + props=application_gateway_props, + tags=tags, + ) + + +class TestSREApplicationGatewayProps: + @pulumi.runtime.test + def test_props(self, application_gateway_props: SREApplicationGatewayProps): + assert isinstance(application_gateway_props, SREApplicationGatewayProps) + + @pulumi.runtime.test + def test_props_key_vault_certificate_id( + self, application_gateway_props: SREApplicationGatewayProps + ): + pulumi.Output.from_input( + application_gateway_props.key_vault_certificate_id + ).apply( + partial(assert_equal, "key_vault_certificate_id"), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_props_resource_group_id( + self, application_gateway_props: SREApplicationGatewayProps + ): + application_gateway_props.resource_group_id.apply( + partial(assert_equal, pulumi.UNKNOWN), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_props_resource_group_name( + self, application_gateway_props: SREApplicationGatewayProps + ): + application_gateway_props.resource_group_name.apply( + partial(assert_equal, "None"), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_props_sre_fqdn( + self, application_gateway_props: SREApplicationGatewayProps, sre_fqdn + ): + pulumi.Output.from_input(application_gateway_props.sre_fqdn).apply( + partial(assert_equal, sre_fqdn), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_props_subnet_application_gateway_id( + self, application_gateway_props: SREApplicationGatewayProps + ): + application_gateway_props.subnet_application_gateway_id.apply( + partial(assert_equal, "subnet_application_gateway_id"), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_props_subnet_guacamole_containers_ip_addresses( + self, application_gateway_props: SREApplicationGatewayProps + ): + application_gateway_props.subnet_guacamole_containers_ip_addresses.apply( + partial(assert_equal, ["10.0.1.44", "10.0.1.45", "10.0.1.46"]), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_props_user_assigned_identities( + self, application_gateway_props: SREApplicationGatewayProps + ): + application_gateway_props.user_assigned_identities.apply( + partial(assert_equal, pulumi.UNKNOWN), + run_with_unknowns=True, + ) + + +class TestSREApplicationGatewayComponent: + @pulumi.runtime.test + def test_application_gateway( + self, application_gateway_component: SREApplicationGatewayComponent + ): + assert isinstance( + application_gateway_component.application_gateway, + network.ApplicationGateway, + ) + + @pulumi.runtime.test + def test_application_gateway_authentication_certificates( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.authentication_certificates.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_autoscale_configuration( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.autoscale_configuration.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_backend_address_pools( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.backend_address_pools.apply( + partial( + assert_equal_json, + [ + { + "backend_ip_configurations": None, + "etag": None, + "provisioning_state": None, + "type": None, + "backend_addresses": [ + {"ip_address": "10.0.1.44"}, + {"ip_address": "10.0.1.45"}, + {"ip_address": "10.0.1.46"}, + ], + "name": "appGatewayBackendGuacamole", + } + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_backend_http_settings_collection( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.backend_http_settings_collection.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "type": None, + "cookie_based_affinity": "Enabled", + "name": "appGatewayBackendHttpSettings", + "port": 80, + "protocol": "Http", + "request_timeout": 30, + } + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_backend_settings_collection( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.backend_settings_collection.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_custom_error_configurations( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.custom_error_configurations.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_default_predefined_ssl_policy( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.default_predefined_ssl_policy.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_enable_fips( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.enable_fips.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_enable_http2( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.enable_http2.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_etag( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.etag.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_firewall_policy( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.firewall_policy.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_force_firewall_policy_association( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.force_firewall_policy_association.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_frontend_ip_configurations( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.frontend_ip_configurations.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "type": None, + "name": "appGatewayFrontendIP", + "private_ip_allocation_method": "Dynamic", + "public_ip_address": {"id": None}, + } + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_frontend_ports( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.frontend_ports.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "type": None, + "name": "appGatewayFrontendHttp", + "port": 80, + }, + { + "etag": None, + "provisioning_state": None, + "type": None, + "name": "appGatewayFrontendHttps", + "port": 443, + }, + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_gateway_ip_configurations( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.gateway_ip_configurations.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "type": None, + "name": "appGatewayIP", + "subnet": {"id": "subnet_application_gateway_id"}, + } + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_global_configuration( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.global_configuration.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_http_listeners( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.http_listeners.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "type": None, + "frontend_ip_configuration": {"id": None}, + "frontend_port": {"id": None}, + "host_name": "sre.example.com", + "name": "GuacamoleHttpListener", + "protocol": "Http", + }, + { + "etag": None, + "provisioning_state": None, + "type": None, + "frontend_ip_configuration": {"id": None}, + "frontend_port": {"id": None}, + "host_name": "sre.example.com", + "name": "GuacamoleHttpsListener", + "protocol": "Https", + "ssl_certificate": {"id": None}, + }, + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_identity( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.identity.apply( + partial( + assert_equal_json, + {"principal_id": None, "tenant_id": None, "type": "UserAssigned"}, + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_listeners( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.listeners.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_load_distribution_policies( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.load_distribution_policies.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_location( + self, + application_gateway_component: SREApplicationGatewayComponent, + location: str, + ): + application_gateway_component.application_gateway.location.apply( + partial(assert_equal, location), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_name( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.name.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_operational_state( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.operational_state.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_private_endpoint_connections( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.private_endpoint_connections.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_private_link_configurations( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.private_link_configurations.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_probes( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.probes.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_provisioning_state( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.provisioning_state.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_redirect_configurations( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.redirect_configurations.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "type": None, + "include_path": True, + "include_query_string": True, + "name": "GuacamoleHttpToHttpsRedirection", + "redirect_type": "Permanent", + "request_routing_rules": [{"id": None}], + "target_listener": {"id": None}, + } + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_request_routing_rules( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.request_routing_rules.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "type": None, + "http_listener": {"id": None}, + "name": "GuacamoleHttpRouting", + "priority": 200, + "redirect_configuration": {"id": None}, + "rule_type": "Basic", + }, + { + "etag": None, + "provisioning_state": None, + "type": None, + "backend_address_pool": {"id": None}, + "backend_http_settings": {"id": None}, + "http_listener": {"id": None}, + "name": "GuacamoleHttpsRouting", + "priority": 100, + "rule_type": "Basic", + }, + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_resource_guid( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.resource_guid.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_rewrite_rule_sets( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.rewrite_rule_sets.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_routing_rules( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.routing_rules.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_sku( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.sku.apply( + partial( + assert_equal, + network.outputs.ApplicationGatewaySkuResponse( + capacity=1, + name="Standard_v2", + tier="Standard_v2", + ), + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_ssl_certificates( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.ssl_certificates.apply( + partial( + assert_equal_json, + [ + { + "etag": None, + "provisioning_state": None, + "public_cert_data": None, + "type": None, + "key_vault_secret_id": "key_vault_certificate_id", + "name": "letsencryptcertificate", + } + ], + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_ssl_policy( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.ssl_policy.apply( + partial( + assert_equal_json, + { + "cipher_suites": [ + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + ], + "min_protocol_version": "TLSv1_2", + "policy_type": "CustomV2", + }, + ), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_ssl_profiles( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.ssl_profiles.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_tags( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.tags.apply( + partial(assert_equal, {"key": "value", "component": "application gateway"}), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_trusted_client_certificates( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.trusted_client_certificates.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_type( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.type.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_url_path_maps( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.url_path_maps.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_web_application_firewall_configuration( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.web_application_firewall_configuration.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) + + @pulumi.runtime.test + def test_application_gateway_zones( + self, application_gateway_component: SREApplicationGatewayComponent + ): + application_gateway_component.application_gateway.zones.apply( + partial(assert_equal, None), + run_with_unknowns=True, + ) diff --git a/tests/infrastructure/programs/sre/test_workspaces.py b/tests/infrastructure/programs/sre/test_workspaces.py new file mode 100644 index 0000000000..108ce162d0 --- /dev/null +++ b/tests/infrastructure/programs/sre/test_workspaces.py @@ -0,0 +1,15 @@ +from data_safe_haven.infrastructure.programs.sre.workspaces import ( + SREWorkspacesComponent, +) + + +class TestTemplateCloudInit: + def test_template_cloudinit(self): + cloudinit = SREWorkspacesComponent.template_cloudinit( + storage_account_data_desired_state_name="storageaccount", + ) + + assert ( + '- ["storageaccount.blob.core.windows.net:/storageaccount/desiredstate", /desired_state, nfs, "ro,' + in cloudinit + ) diff --git a/tests/infrastructure/test_project_manager.py b/tests/infrastructure/test_project_manager.py new file mode 100644 index 0000000000..259c5f1b37 --- /dev/null +++ b/tests/infrastructure/test_project_manager.py @@ -0,0 +1,147 @@ +from pulumi.automation import ( + LocalWorkspace, + ProjectSettings, + Stack, + StackSettings, +) +from pytest import raises + +from data_safe_haven.config import DSHPulumiProject +from data_safe_haven.exceptions import ( + DataSafeHavenConfigError, + DataSafeHavenPulumiError, +) +from data_safe_haven.infrastructure import SREProjectManager +from data_safe_haven.infrastructure.project_manager import ProjectManager + + +class TestSREProjectManager: + def test_constructor( + self, + context_no_secrets, + sre_config, + pulumi_config_no_key, + pulumi_project_sandbox, + ): + sre = SREProjectManager( + context_no_secrets, + sre_config, + pulumi_config_no_key, + ) + assert isinstance(sre, SREProjectManager) + assert isinstance(sre, ProjectManager) + assert sre.context == context_no_secrets + assert sre.pulumi_project == pulumi_project_sandbox + + def test_cleanup( + self, + capsys, + mock_azuresdk_blob_exists, # noqa: ARG002 + mock_azuresdk_purge_keyvault, # noqa: ARG002 + mock_azuresdk_remove_blob, # noqa: ARG002 + sre_project_manager, + ): + sre_project_manager.cleanup() + stdout, _ = capsys.readouterr() + assert ( + "Removed Pulumi stack backup shm-acmedeployment-sre-sandbox.json.bak." + in stdout + ) + assert "Purged Azure Key Vault shmacmedsresandbosecrets." in stdout + + def test_new_project( + self, + context_no_secrets, + sre_config, + pulumi_config_empty, + ): + sre = SREProjectManager( + context_no_secrets, + sre_config, + pulumi_config_empty, + create_project=True, + ) + assert isinstance(sre, SREProjectManager) + assert isinstance(sre, ProjectManager) + assert sre.context == context_no_secrets + # Ensure a project was created + assert isinstance(sre.pulumi_project, DSHPulumiProject) + assert "sandbox" in pulumi_config_empty.project_names + assert pulumi_config_empty["sandbox"].stack_config == {} + assert pulumi_config_empty.encrypted_key is None + + def test_new_project_fail( + self, + context_no_secrets, + sre_config, + pulumi_config_empty, + ): + sre = SREProjectManager( + context_no_secrets, sre_config, pulumi_config_empty, create_project=False + ) + with raises( + DataSafeHavenConfigError, + match="No SRE named sandbox is defined.", + ): + _ = sre.pulumi_project + + def test_project_settings(self, sre_project_manager): + project_settings = sre_project_manager.project_settings + assert isinstance(project_settings, ProjectSettings) + assert project_settings.name == "data-safe-haven" + assert project_settings.runtime == "python" + assert project_settings.backend is None + + def test_stack_settings(self, sre_project_manager): + stack_settings = sre_project_manager.stack_settings + assert isinstance(stack_settings, StackSettings) + assert stack_settings.config == sre_project_manager.pulumi_project.stack_config + assert ( + stack_settings.encrypted_key + == sre_project_manager.pulumi_config.encrypted_key + ) + assert ( + stack_settings.secrets_provider + == sre_project_manager.context.pulumi_secrets_provider_url + ) + + def test_pulumi_project(self, sre_project_manager, pulumi_project_sandbox): + assert sre_project_manager.pulumi_project == pulumi_project_sandbox + + def test_run_pulumi_command(self, sre_project_manager): + stdout = sre_project_manager.run_pulumi_command("stack ls") + assert "shm-acmedeployment-sre-sandbox*" in stdout + + def test_run_pulumi_command_command_error(self, sre_project_manager): + with raises( + DataSafeHavenPulumiError, + match="Failed to run command.", + ): + sre_project_manager.run_pulumi_command("notapulumicommand") + + def test_stack(self, sre_project_manager): + stack = sre_project_manager.stack + assert isinstance(stack, Stack) + + def test_stack_config(self, sre_project_manager): + stack = sre_project_manager.stack + assert stack.name == "shm-acmedeployment-sre-sandbox" + assert isinstance(stack.workspace, LocalWorkspace) + workspace = stack.workspace + assert ( + workspace.secrets_provider + == sre_project_manager.context.pulumi_secrets_provider_url + ) + config = stack.get_all_config() + assert config["azure-native:location"].value == "uksouth" + assert config["data-safe-haven:variable"].value == "8" + + def test_update_dsh_pulumi_project(self, sre_project_manager): + sre_project_manager.set_config("new-key", "hello", secret=False) + config = sre_project_manager.stack.get_all_config() + assert "data-safe-haven:new-key" in config + assert config.get("data-safe-haven:new-key").value == "hello" + sre_project_manager.update_dsh_pulumi_project() + stack_config = sre_project_manager.pulumi_project.stack_config + assert "data-safe-haven:new-key" in stack_config + assert stack_config.get("data-safe-haven:new-key") == "hello" diff --git a/tests/logging/test_logger.py b/tests/logging/test_logger.py new file mode 100644 index 0000000000..f5cf8e9400 --- /dev/null +++ b/tests/logging/test_logger.py @@ -0,0 +1,101 @@ +import logging +from datetime import datetime +from pathlib import Path + +from rich.logging import RichHandler + +from data_safe_haven.logging.logger import ( + PlainFileHandler, + from_ansi, + get_console_handler, + get_logger, + logfile_name, + set_console_level, + show_console_level, +) + + +class TestFromAnsi: + def test_from_ansi(self, capsys): + logger = get_logger() + from_ansi(logger, "\033[31;1;4mHello\033[0m") + out, _ = capsys.readouterr() + assert "Hello" in out + assert r"\033" not in out + + +class TestLogFileName: + def test_logfile_name(self): + name = logfile_name() + assert name.endswith(".log") + date = name.split(".")[0] + assert datetime.strptime(date, "%Y-%m-%d") # noqa: DTZ007 + + +class TestGetConsoleHandler: + def test_get_console_handler(self): + handler = get_console_handler() + assert isinstance(handler, RichHandler) + assert handler.level == logging.INFO + + +class TestGetLogger: + def test_get_logger(self): + logger = get_logger() + assert isinstance(logger, logging.Logger) + assert logger.name == "data_safe_haven" + + +class TestLogger: + def test_constructor(self, log_directory): + logger = get_logger() + file_handler = next( + h for h in logger.handlers if isinstance(h, PlainFileHandler) + ) + + assert file_handler + assert file_handler.level == logging.NOTSET + assert file_handler.baseFilename == f"{log_directory}/test.log" + + def test_info(self, capsys): + logger = get_logger() + file_handler = next( + h for h in logger.handlers if isinstance(h, PlainFileHandler) + ) + log_file = Path(file_handler.baseFilename) + + logger.info("hello") + out, _ = capsys.readouterr() + + assert "hello" in out + assert log_file.is_file() + + +class TestSetConsoleLevel: + def test_set_console_level(self): + handler = get_console_handler() + assert handler.level == logging.INFO + set_console_level(logging.DEBUG) + assert handler.level == logging.DEBUG + + def test_set_console_level_stdout(self, capsys): + logger = get_logger() + set_console_level(logging.DEBUG) + logger.debug("hello") + out, _ = capsys.readouterr() + assert "hello" in out + + +class TestShowConsoleLevel: + def test_show_console_level(self): + handler = get_console_handler() + assert not handler._log_render.show_level + show_console_level() + assert handler._log_render.show_level + + def test_show_console_level_stdout(self, capsys): + logger = get_logger() + show_console_level() + logger.info("hello") + out, _ = capsys.readouterr() + assert "INFO" in out diff --git a/tests/logging/test_non_logging_singleton.py b/tests/logging/test_non_logging_singleton.py new file mode 100644 index 0000000000..3394894abc --- /dev/null +++ b/tests/logging/test_non_logging_singleton.py @@ -0,0 +1,36 @@ +import logging + +import pytest + +from data_safe_haven.logging import get_null_logger +from data_safe_haven.logging.non_logging_singleton import NonLoggingSingleton +from data_safe_haven.singleton import Singleton + + +class TestNonLoggingSingleton: + def test_constructor(self): + logger = get_null_logger() + assert isinstance(logger, NonLoggingSingleton) + assert type(logger.__class__) is Singleton + + @pytest.mark.parametrize( + "level", + [ + "debug", + "info", + "warning", + "error", + "critical", + "fatal", + ], + ) + def test_output_is_none(self, level, capsys): + logger = get_null_logger() + getattr(logger, level)("Hello world!") + stdout, stderr = capsys.readouterr() + assert stdout == "" + assert stderr == "" + + def test_default_level(self): + logger = get_null_logger() + assert logger.level > logging.CRITICAL diff --git a/tests/logging/test_plain_file_handler.py b/tests/logging/test_plain_file_handler.py new file mode 100644 index 0000000000..a2bf60ad81 --- /dev/null +++ b/tests/logging/test_plain_file_handler.py @@ -0,0 +1,6 @@ +from data_safe_haven.logging.plain_file_handler import PlainFileHandler + + +class TestPlainFileHandler: + def test_strip_formatting(self): + assert PlainFileHandler.strip_formatting("[green]hello[/]") == "hello" diff --git a/tests/pester/CodeStyle.Tests.ps1 b/tests/pester/CodeStyle.Tests.ps1 deleted file mode 100644 index 0981c98f36..0000000000 --- a/tests/pester/CodeStyle.Tests.ps1 +++ /dev/null @@ -1,28 +0,0 @@ -# Formatter settings -# ------------------ -$FileExtensions = @("*.ps1", "*.psm1", "*.psd1") -$CodeDirectories = @("deployment", "tests") -$FileDetails = @() -foreach ($CodeDirectory in $CodeDirectories) { - $CodeDirectoryPath = Join-Path -Path (Get-Item $PSScriptRoot).Parent.Parent -ChildPath $CodeDirectory - $FileDetails += @(Get-ChildItem -Path $CodeDirectoryPath -Include $FileExtensions -Recurse | Where-Object { ($_.FullName -notmatch "shm-dc1-setup-scripts") -and ($_ -notmatch "shm-dc2-setup-scripts") } | ForEach-Object { @{"FilePath" = $_.FullName; "FileName" = $_.Name } }) -} - -BeforeAll { - Import-Module PSScriptAnalyzer -ErrorAction Stop -} - -# Run Invoke-Formatter on all files -# --------------------------------- -Describe "Powershell formatting" { - BeforeAll { - $SettingsPath = Join-Path -Path (Get-Item $PSScriptRoot).Parent.Parent -ChildPath ".PSScriptFormatterSettings.psd1" - } - It "Checks that '' is correctly formatted" -TestCases $FileDetails { - param ($FileName, $FilePath) - $Unformatted = Get-Content -Path $FilePath -Raw - $Formatted = Invoke-Formatter -ScriptDefinition $Unformatted -Settings $SettingsPath - $Diff = Compare-Object -ReferenceObject $Unformatted.Split("`n") -DifferenceObject $Formatted.Split("`n") - $Diff | Out-String | Should -BeNullOrEmpty - } -} diff --git a/tests/pester/ConfigurationFiles.Tests.ps1 b/tests/pester/ConfigurationFiles.Tests.ps1 deleted file mode 100644 index 2cefdaf178..0000000000 --- a/tests/pester/ConfigurationFiles.Tests.ps1 +++ /dev/null @@ -1,52 +0,0 @@ -# Formatter settings -# ------------------ -$FileExtensions = @("*.json") -$ReferenceConfigFilePath = Join-Path -Path (Get-Item $PSScriptRoot).Parent -ChildPath "resources" -$ShmIds = Get-ChildItem -Path $ReferenceConfigFilePath | ForEach-Object { $_.Name } | Where-Object { $_ -like "shm_*" } | ForEach-Object { $_.Split("_")[1] } -$ConfigFileDetails = @(Get-ChildItem -Path $ReferenceConfigFilePath -Include $FileExtensions -Recurse | ForEach-Object { @{"FilePath" = $_.FullName; "FileName" = $_.Name; "ConfigType" = $_.Name.Split("_")[0]; "ConfigId" = $_.Name.Split("_")[1]; "ShmIds" = $ShmIds } }) - -BeforeAll { - Import-Module $PSScriptRoot/../../deployment/common/Configuration -Force -ErrorAction Stop - Import-Module $PSScriptRoot/../../deployment/common/DataStructures -Force -ErrorAction Stop -} - -Describe "SHM configuration file check" { - It "Checks that SHM config '' expands to give the reference: ''" -TestCases ($ConfigFileDetails | Where-Object { $_.ConfigType -eq "shm" }) { - param ($FileName, $FilePath, $ConfigType, $ConfigId) - - # Load reference config and convert it to a sorted hashtable - $referenceConfig = Get-Content -Path $FilePath -Raw -ErrorAction Stop | ConvertFrom-Json -AsHashtable | ConvertTo-SortedHashtable - - # Load test config - Mock Write-Information {} # we mock Write-Information here as we expect output from the `Get-SreConfig` call - $testConfig = Get-ShmConfig -shmId $ConfigId | ConvertTo-SortedHashtable - - # Compare the two configs as JSON strings - # Note that we could use `Test-Equality` from the `Functional` module here, but that would not tell us *where* any differences are - $Diff = Compare-Object -ReferenceObject $($referenceConfig | ConvertTo-Json -Depth 99).Split("`n") -DifferenceObject $($testConfig | ConvertTo-Json -Depth 99).Split("`n") - $Diff | Out-String | Should -BeNullOrEmpty - } -} - -Describe "SRE configuration file check" { - BeforeAll { - } - It "Checks that SRE config '' expands to give the reference: ''" -TestCases ($ConfigFileDetails | Where-Object { $_.ConfigType -eq "sre" }) { - param ($FileName, $FilePath, $ConfigType, $ConfigId, $ShmIds) - - # Load reference config and convert it to a sorted hashtable - $referenceConfig = Get-Content -Path $FilePath -Raw -ErrorAction Stop | ConvertFrom-Json -AsHashtable | ConvertTo-SortedHashtable - - # Split the ConfigId into shmId and sreId by matching to the list of known shmIds - $shmId = $ShmIds | Where-Object { $ConfigId.Split($_)[0] -ne $ConfigId } | Select-Object -First 1 - $sreId = $ConfigId.Split($shmId)[1] - # Load test config - Mock Write-Information {} # we mock Write-Information here as we expect output from the `Get-SreConfig` call - $testConfig = Get-SreConfig -shmId $shmId -sreId $sreId - - # Compare the two configs as JSON strings - # Note that we could use `Test-Equality` from the `Functional` module here, but that would not tell us *where* any differences are - $Diff = Compare-Object -ReferenceObject $($referenceConfig | ConvertTo-Json -Depth 99).Split("`n") -DifferenceObject $($testConfig | ConvertTo-Json -Depth 99).Split("`n") - $Diff | Out-String | Should -BeNullOrEmpty - } -} diff --git a/tests/pester/DataStructures.Tests.ps1 b/tests/pester/DataStructures.Tests.ps1 deleted file mode 100644 index 519c1269fc..0000000000 --- a/tests/pester/DataStructures.Tests.ps1 +++ /dev/null @@ -1,72 +0,0 @@ -BeforeAll { - Import-Module $PSScriptRoot/../../deployment/common/DataStructures -Force -ErrorAction Stop -} - -# Test ConvertTo-SortedHashtable -Describe "Test ConvertTo-SortedHashtable" { - It "Returns True if ordered hashtable is correctly sorted" { - $unsorted = [ordered]@{ - b = "B" - a = [ordered]@{ - d = "D" - c = 17 - } - } - $sorted = ConvertTo-SortedHashtable -Sortable $unsorted - $sorted[0][0] | Should -Be 17 - $sorted[0][1] | Should -Be "D" - $sorted[1] | Should -Be "B" - } -} - - -# Test Copy-HashtableOverrides -Describe "Test Copy-HashtableOverrides" { - It "Returns True overrides are applied to selected keys while leaving others untouched" { - $target = [ordered]@{ - a = [ordered]@{ - c = 17 - d = "D" - } - b = "B" - } - $overrides = [ordered]@{ a = [ordered]@{ d = "16" } } - $null = Copy-HashtableOverrides -Source $overrides -Target $target - $target["a"]["c"] | Should -Be 17 - $target["a"]["d"] | Should -Be "16" - $target["b"] | Should -Be "B" - } -} - - -# Test Find-AllMatchingKeys -Describe "Test Find-AllMatchingKeys" { - It "Returns True if all matching keys are found" { - $target = [ordered]@{ - a = [ordered]@{ - c = 17 - d = 1 - } - b = [ordered]@{ - d = "3" - } - d = "2" - } - Find-AllMatchingKeys -Hashtable $target -Key "d" | Should -Be @(1, "3", "2") - } -} - - -# Test Limit-StringLength -Describe "Test Limit-StringLength MaximumLength" { - It "Returns True if string length is correctly limited" { - "abcdefghijklm" | Limit-StringLength -Silent -MaximumLength 6 | Should -Be "abcdef" - } -} -Describe "Test Limit-StringLength FailureIsFatal" { - It "Should throw an exception since the string is too long" { - Mock Write-Information -Module Logging {} # we mock Write-Information here as we expect output from the exception - { "abcdefghijklm" | Limit-StringLength -FailureIsFatal -MaximumLength 6 } | Should -Throw "'abcdefghijklm' has length 13 but must not exceed 6!" - Assert-MockCalled -Module Logging Write-Information -Exactly 1 -Scope It - } -} diff --git a/tests/resources/shm_blue_full_config.json b/tests/resources/shm_blue_full_config.json deleted file mode 100644 index 990482159c..0000000000 --- a/tests/resources/shm_blue_full_config.json +++ /dev/null @@ -1,1052 +0,0 @@ -{ - "azureAdminGroupName": "Safe Haven Test Admins", - "azureAdTenantId": "1d93a235-0c16-4f39-adff-a550719bdb49", - "dc": { - "adDirectory": "C:\\ActiveDirectory", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "external_dns_resolver": "168.63.129.16", - "fqdn": "dc1-shm-blue.blue.develop.turingsafehaven.ac.uk", - "hostname": "DC1-SHM-BLUE", - "hostnameLower": "dc1-shm-blue", - "hostnameUpper": "DC1-SHM-BLUE", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.4", - "rg": "RG_SHM_BLUE_DC", - "safemodePasswordSecretName": "shm-blue-vm-safemode-password-dc", - "vmName": "DC1-SHM-BLUE", - "vmSize": "Standard_D2s_v3" - }, - "dcb": { - "fqdn": "dc2-shm-blue.blue.develop.turingsafehaven.ac.uk", - "hostname": "DC2-SHM-BLUE", - "ip": "10.0.0.5", - "vmName": "DC2-SHM-BLUE" - }, - "diskTypeDefault": "Standard_LRS", - "dns": { - "rg": "RG_SHM_DNS_TEST", - "subscriptionName": "[Prod] Safe Haven Domains" - }, - "dockerAccount": "youraccountname", - "dockerPassword": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0", - "domain": { - "dn": "DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "blue.develop.turingsafehaven.ac.uk", - "fqdnLower": "blue.develop.turingsafehaven.ac.uk", - "fqdnUpper": "BLUE.DEVELOP.TURINGSAFEHAVEN.AC.UK", - "netbiosName": "BLUE", - "ous": { - "databaseServers": { - "name": "Secure Research Environment Database Servers", - "path": "OU=Secure Research Environment Database Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "identityServers": { - "name": "Safe Haven Identity Servers", - "path": "OU=Safe Haven Identity Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "linuxServers": { - "name": "Secure Research Environment Linux Servers", - "path": "OU=Secure Research Environment Linux Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "researchUsers": { - "name": "Safe Haven Research Users", - "path": "OU=Safe Haven Research Users,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "securityGroups": { - "name": "Safe Haven Security Groups", - "path": "OU=Safe Haven Security Groups,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "serviceAccounts": { - "name": "Safe Haven Service Accounts", - "path": "OU=Safe Haven Service Accounts,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - } - }, - "securityGroups": { - "computerManagers": { - "description": "SG Safe Haven Computer Management Users", - "name": "SG Safe Haven Computer Management Users" - }, - "serverAdmins": { - "description": "SG Safe Haven Server Administrators", - "name": "SG Safe Haven Server Administrators" - } - } - }, - "firewall": { - "name": "FIREWALL-SHM-BLUE", - "routeTableName": "ROUTE-TABLE-SHM-BLUE" - }, - "id": "blue", - "keyVault": { - "name": "kv-shm-blue", - "rg": "RG_SHM_BLUE_SECRETS", - "secretNames": { - "aadEmergencyAdminPassword": "shm-blue-aad-emergency-admin-password", - "aadEmergencyAdminUsername": "shm-blue-aad-emergency-admin-username", - "buildImageAdminPassword": "shm-blue-buildimage-admin-password", - "buildImageAdminUsername": "shm-blue-buildimage-admin-username", - "domainAdminPassword": "shm-blue-domain-admin-password", - "domainAdminUsername": "shm-blue-domain-admin-username", - "vmAdminUsername": "shm-blue-vm-admin-username", - "vpnCaCertificate": "shm-blue-vpn-ca-cert", - "vpnCaCertificatePlain": "shm-blue-vpn-ca-cert-plain", - "vpnCaCertPassword": "shm-blue-vpn-ca-cert-password", - "vpnClientCertificate": "shm-blue-vpn-client-cert", - "vpnClientCertPassword": "shm-blue-vpn-client-cert-password" - } - }, - "location": "uksouth", - "monitoring": { - "automationAccount": { - "name": "shm-blue-automation" - }, - "loggingWorkspace": { - "name": "shm-blue-loganalytics" - }, - "privatelink": { - "name": "shm-blue-privatelinkscope" - }, - "rg": "RG_SHM_BLUE_MONITORING", - "updateServers": { - "externalIpAddresses": { - "linux": [ - "72.32.157.246", - "87.238.57.227", - "147.75.85.69", - "217.196.149.55", - "91.189.88.0/21", - "185.125.188.0/22", - "103.21.244.0/22", - "103.22.200.0/22", - "103.31.4.0/22", - "104.16.0.0/13", - "104.24.0.0/14", - "108.162.192.0/18", - "131.0.72.0/22", - "141.101.64.0/18", - "162.158.0.0/15", - "172.64.0.0/13", - "173.245.48.0/20", - "188.114.96.0/20", - "190.93.240.0/20", - "197.234.240.0/22", - "198.41.128.0/17", - "3.10.17.128/25", - "3.101.158.0/23", - "3.11.53.0/24", - "3.128.93.0/24", - "3.134.215.0/24", - "3.231.2.0/25", - "3.234.232.224/27", - "3.236.169.192/26", - "3.236.48.0/23", - "3.35.130.128/25", - "13.113.196.64/26", - "13.113.203.0/24", - "13.124.199.0/24", - "13.210.67.128/26", - "13.224.0.0/14", - "13.228.69.0/24", - "13.233.177.192/26", - "13.249.0.0/16", - "13.32.0.0/15", - "13.35.0.0/16", - "13.48.32.0/24", - "13.54.63.128/26", - "13.59.250.0/26", - "15.158.0.0/16", - "15.188.184.0/24", - "15.207.13.128/25", - "15.207.213.128/25", - "18.154.0.0/15", - "18.160.0.0/15", - "18.164.0.0/15", - "18.172.0.0/15", - "18.192.142.0/23", - "18.200.212.0/23", - "18.216.170.128/25", - "18.229.220.192/26", - "18.238.0.0/15", - "18.244.0.0/15", - "18.64.0.0/14", - "18.68.0.0/16", - "34.195.252.0/24", - "34.216.51.0/25", - "34.223.12.224/27", - "34.223.80.192/26", - "34.226.14.0/24", - "35.158.136.0/24", - "35.162.63.192/26", - "35.167.191.128/26", - "36.103.232.0/25", - "36.103.232.128/26", - "44.227.178.0/24", - "44.234.108.128/25", - "44.234.90.252/30", - "52.124.128.0/17", - "52.15.127.128/26", - "52.199.127.192/26", - "52.212.248.0/26", - "52.220.191.0/26", - "52.222.128.0/17", - "52.46.0.0/18", - "52.47.139.0/24", - "52.52.191.128/26", - "52.56.127.0/25", - "52.57.254.0/24", - "52.66.194.128/26", - "52.78.247.128/26", - "52.82.128.0/19", - "52.84.0.0/15", - "54.182.0.0/16", - "54.192.0.0/16", - "54.230.0.0/17", - "54.230.128.0/18", - "54.230.200.0/21", - "54.230.208.0/20", - "54.230.224.0/19", - "54.233.255.128/26", - "54.239.128.0/18", - "54.239.192.0/19", - "54.240.128.0/18", - "58.254.138.0/25", - "58.254.138.128/26", - "64.252.128.0/18", - "64.252.64.0/18", - "65.8.0.0/16", - "65.9.0.0/17", - "65.9.128.0/18", - "70.132.0.0/18", - "71.152.0.0/17", - "99.79.169.0/24", - "99.84.0.0/16", - "99.86.0.0/16", - "108.138.0.0/15", - "108.156.0.0/14", - "116.129.226.0/25", - "116.129.226.128/26", - "118.193.97.128/25", - "118.193.97.64/26", - "119.147.182.0/25", - "119.147.182.128/26", - "120.232.236.0/25", - "120.232.236.128/26", - "120.253.240.192/26", - "120.253.241.160/27", - "120.253.245.128/26", - "120.253.245.192/27", - "120.52.12.64/26", - "120.52.153.192/26", - "120.52.22.96/27", - "120.52.39.128/27", - "130.176.0.0/17", - "130.176.128.0/18", - "130.176.192.0/19", - "130.176.224.0/20", - "143.204.0.0/16", - "144.220.0.0/16", - "180.163.57.0/25", - "180.163.57.128/26", - "204.246.164.0/22", - "204.246.168.0/22", - "204.246.172.0/24", - "204.246.173.0/24", - "204.246.174.0/23", - "204.246.176.0/20", - "205.251.200.0/21", - "205.251.208.0/20", - "205.251.249.0/24", - "205.251.250.0/23", - "205.251.252.0/23", - "205.251.254.0/24", - "216.137.32.0/19", - "223.71.11.0/27", - "223.71.71.128/25", - "223.71.71.96/27", - "104.131.190.124", - "152.199.20.126", - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ], - "windows": [ - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ] - }, - "linux": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-linux-update-server", - "disks": { - "os": { - "sizeGb": "64", - "type": "Standard_LRS" - } - }, - "hostname": "LINUX-UPDATES-SHM-BLUE", - "ip": "10.0.3.4", - "vmName": "LINUX-UPDATES-SHM-BLUE", - "vmSize": "Standard_B2ms" - }, - "schedule": { - "daily_definition_updates": { - "hour": "01", - "minute": "01" - }, - "weekly_system_updates": { - "day": "Tuesday", - "hour": "02", - "minute": "02" - } - } - } - }, - "name": "Turing Data Safe Haven (Blue)", - "network": { - "vnet": { - "cidr": "10.0.0.0/21", - "name": "VNET_SHM_BLUE", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "firewall": { - "cidr": "10.0.2.0/24", - "name": "AzureFirewallSubnet" - }, - "gateway": { - "cidr": "10.0.7.0/24", - "name": "GatewaySubnet" - }, - "identity": { - "cidr": "10.0.0.0/24", - "name": "IdentitySubnet", - "nsg": { - "name": "NSG_SHM_BLUE_IDENTITY", - "rules": "shm-nsg-rules-identity.json" - } - }, - "monitoring": { - "cidr": "10.0.1.0/24", - "name": "MonitoringSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_MONITORING", - "rules": "shm-nsg-rules-monitoring.json" - } - }, - "updateServers": { - "cidr": "10.0.3.0/24", - "name": "UpdateServersSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_UPDATE_SERVERS", - "rules": "shm-nsg-rules-update-servers.json" - } - } - } - }, - "vnetRepositoriesTier2": { - "cidr": "10.10.2.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_2", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.2.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_DEPLOYMENT_TIER_2", - "rules": "shm-nsg-rules-repository-deployment-tier2.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.2.64/26", - "name": "RepositoryMirrorsExternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_EXTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-external-tier2.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.2.128/26", - "name": "RepositoryMirrorsInternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_INTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier2.json" - } - }, - "proxies": { - "cidr": "10.10.2.192/26", - "name": "RepositoryProxiesTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_PROXIES_TIER_2", - "rules": "shm-nsg-rules-repository-proxies-tier2.json" - } - } - } - }, - "vnetRepositoriesTier3": { - "cidr": "10.10.3.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_3", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.3.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_DEPLOYMENT_TIER_3", - "rules": "shm-nsg-rules-repository-deployment-tier3.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.3.64/26", - "name": "RepositoryMirrorsExternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_EXTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-external-tier3.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.3.128/26", - "name": "RepositoryMirrorsInternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_INTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier3.json" - } - }, - "proxies": { - "cidr": "10.10.3.192/26", - "name": "RepositoryProxiesTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_PROXIES_TIER_3", - "rules": "shm-nsg-rules-repository-proxies-tier3.json" - } - } - } - }, - "vpn": { - "cidr": "172.16.201.0/24" - } - }, - "nps": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-nps", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "hostname": "NPS-SHM-BLUE", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.6", - "rg": "RG_SHM_BLUE_NPS", - "vmName": "NPS-SHM-BLUE", - "vmSize": "Standard_D2s_v3" - }, - "nsgPrefix": "NSG_SHM_BLUE", - "organisation": { - "contactEmail": "safehavendevs@turing.ac.uk", - "countryCode": "GB", - "name": "The Alan Turing Institute", - "stateCountyRegion": "London", - "townCity": "London" - }, - "repositories": { - "rg": "RG_SHM_BLUE_PACKAGE_REPOSITORIES", - "tier2": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-many-repository-proxy-tier-2", - "applicationAdminPasswordSecretName": "shm-blue-application-admin-password-many-repository-proxy-tier-2", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.2.198", - "vmName": "SHM-BLUE-MANY-REPOSITORY-PROXY-TIER-2", - "vmSize": "Standard_B2ms" - } - } - }, - "tier3": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-many-repository-proxy-tier-3", - "applicationAdminPasswordSecretName": "shm-blue-application-admin-password-many-repository-proxy-tier-3", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.198", - "vmName": "SHM-BLUE-MANY-REPOSITORY-PROXY-TIER-3", - "vmSize": "Standard_B2ms" - } - } - } - }, - "rgPrefix": "RG_SHM_BLUE", - "srdImage": { - "bootdiagnostics": { - "accountName": "vmimagesbootdiagcmtvwnes", - "rg": "RG_VMIMAGES_BOOT_DIAGNOSTICS" - }, - "build": { - "nsg": { - "allowedIpAddresses": [ - "193.60.220.240", - "193.60.220.253" - ], - "name": "NSG_VMIMAGES_BUILD_CANDIDATES", - "rules": "vmimages-nsg-rules-build-candidates.json" - }, - "rg": "RG_VMIMAGES_BUILD_CANDIDATES", - "subnet": { - "cidr": "10.48.0.0/24", - "name": "BuildCandidatesSubnet" - }, - "vm": { - "diskSizeGb": 128, - "diskType": "Standard_LRS", - "size": "Standard_F8s_v2" - }, - "vnet": { - "cidr": "10.48.0.0/16", - "name": "VNET_VMIMAGES" - } - }, - "gallery": { - "name": "DATA_SAFE_HAVEN_SHARED_IMAGES", - "rg": "RG_VMIMAGES_GALLERY" - }, - "images": { - "rg": "RG_VMIMAGES_STORAGE" - }, - "keyVault": { - "name": "kv-shm-blue-images", - "rg": "RG_VMIMAGES_SECRETS" - }, - "location": "uksouth", - "network": { - "rg": "RG_VMIMAGES_NETWORKING" - }, - "subscription": "[Prod] Safe Haven VM Images" - }, - "storage": { - "artifacts": { - "accountName": "shmblueartifactsanoblwvs", - "containers": { - "shmArtifactsDC": "shm-artifacts-dc", - "shmArtifactsNPS": "shm-artifacts-nps", - "shmDesiredState": "shm-desired-state" - }, - "rg": "RG_SHM_BLUE_STORAGE" - }, - "bootdiagnostics": { - "accountName": "shmbluebootdiagsanoblwvs", - "rg": "RG_SHM_BLUE_STORAGE" - }, - "persistentdata": { - "rg": "RG_SHM_BLUE_PERSISTENT_DATA" - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "time": { - "ntp": { - "serverAddresses": [ - "216.239.35.0", - "216.239.35.4", - "216.239.35.8", - "216.239.35.12" - ], - "serverFqdns": [ - "time.google.com", - "time1.google.com", - "time2.google.com", - "time3.google.com", - "time4.google.com" - ] - }, - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "computerManagers": { - "databaseServers": { - "name": "BLUE Database Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-database-servers", - "samAccountName": "bluedatabasesrvrs" - }, - "identityServers": { - "name": "BLUE Identity Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-identity-servers", - "samAccountName": "blueidentitysrvrs" - }, - "linuxServers": { - "name": "BLUE Linux Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-linux-servers", - "samAccountName": "bluelinuxsrvrs" - } - }, - "serviceAccounts": { - "aadLocalSync": { - "name": "BLUE Local AD Sync Administrator", - "passwordSecretName": "shm-blue-aad-localsync-password", - "samAccountName": "bluelocaladsync", - "usernameSecretName": "shm-blue-aad-localsync-username" - } - } - }, - "vmImagesRgPrefix": "RG_VMIMAGES" -} diff --git a/tests/resources/shm_green_full_config.json b/tests/resources/shm_green_full_config.json deleted file mode 100644 index 9c099cd9e3..0000000000 --- a/tests/resources/shm_green_full_config.json +++ /dev/null @@ -1,1105 +0,0 @@ -{ - "azureAdminGroupName": "Safe Haven Test Admins", - "azureAdTenantId": "cb94a6f6-ef7a-42ab-bcad-4f0b887cfd3e", - "dc": { - "adDirectory": "C:\\ActiveDirectory", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "external_dns_resolver": "168.63.129.16", - "fqdn": "dc1-shm-green.green.develop.turingsafehaven.ac.uk", - "hostname": "DC1-SHM-GREEN", - "hostnameLower": "dc1-shm-green", - "hostnameUpper": "DC1-SHM-GREEN", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.4", - "rg": "RG_SHM_GREEN_DC", - "safemodePasswordSecretName": "shm-green-vm-safemode-password-dc", - "vmName": "DC1-SHM-GREEN", - "vmSize": "Standard_D2s_v3" - }, - "dcb": { - "fqdn": "dc2-shm-green.green.develop.turingsafehaven.ac.uk", - "hostname": "DC2-SHM-GREEN", - "ip": "10.0.0.5", - "vmName": "DC2-SHM-GREEN" - }, - "diskTypeDefault": "Standard_LRS", - "dns": { - "rg": "RG_SHM_DNS_TEST", - "subscriptionName": "[Prod] Safe Haven Domains" - }, - "dockerAccount": "youraccountname", - "dockerPassword": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0", - "domain": { - "dn": "DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "green.develop.turingsafehaven.ac.uk", - "fqdnLower": "green.develop.turingsafehaven.ac.uk", - "fqdnUpper": "GREEN.DEVELOP.TURINGSAFEHAVEN.AC.UK", - "netbiosName": "GREEN", - "ous": { - "databaseServers": { - "name": "Secure Research Environment Database Servers", - "path": "OU=Secure Research Environment Database Servers,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "identityServers": { - "name": "Safe Haven Identity Servers", - "path": "OU=Safe Haven Identity Servers,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "linuxServers": { - "name": "Secure Research Environment Linux Servers", - "path": "OU=Secure Research Environment Linux Servers,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "researchUsers": { - "name": "Safe Haven Research Users", - "path": "OU=Safe Haven Research Users,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "securityGroups": { - "name": "Safe Haven Security Groups", - "path": "OU=Safe Haven Security Groups,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "serviceAccounts": { - "name": "Safe Haven Service Accounts", - "path": "OU=Safe Haven Service Accounts,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - } - }, - "securityGroups": { - "computerManagers": { - "description": "SG Safe Haven Computer Management Users", - "name": "SG Safe Haven Computer Management Users" - }, - "serverAdmins": { - "description": "SG Safe Haven Server Administrators", - "name": "SG Safe Haven Server Administrators" - } - } - }, - "firewall": { - "name": "FIREWALL-SHM-GREEN", - "routeTableName": "ROUTE-TABLE-SHM-GREEN" - }, - "id": "green", - "keyVault": { - "name": "kv-shm-green", - "rg": "RG_SHM_GREEN_SECRETS", - "secretNames": { - "aadEmergencyAdminPassword": "shm-green-aad-emergency-admin-password", - "aadEmergencyAdminUsername": "shm-green-aad-emergency-admin-username", - "buildImageAdminPassword": "shm-green-buildimage-admin-password", - "buildImageAdminUsername": "shm-green-buildimage-admin-username", - "domainAdminPassword": "shm-green-domain-admin-password", - "domainAdminUsername": "shm-green-domain-admin-username", - "vmAdminUsername": "shm-green-vm-admin-username", - "vpnCaCertificate": "shm-green-vpn-ca-cert", - "vpnCaCertificatePlain": "shm-green-vpn-ca-cert-plain", - "vpnCaCertPassword": "shm-green-vpn-ca-cert-password", - "vpnClientCertificate": "shm-green-vpn-client-cert", - "vpnClientCertPassword": "shm-green-vpn-client-cert-password" - } - }, - "location": "uksouth", - "monitoring": { - "automationAccount": { - "name": "shm-green-automation" - }, - "loggingWorkspace": { - "name": "shm-green-loganalytics" - }, - "privatelink": { - "name": "shm-green-privatelinkscope" - }, - "rg": "RG_SHM_GREEN_MONITORING", - "updateServers": { - "externalIpAddresses": { - "linux": [ - "72.32.157.246", - "87.238.57.227", - "147.75.85.69", - "217.196.149.55", - "91.189.88.0/21", - "185.125.188.0/22", - "103.21.244.0/22", - "103.22.200.0/22", - "103.31.4.0/22", - "104.16.0.0/13", - "104.24.0.0/14", - "108.162.192.0/18", - "131.0.72.0/22", - "141.101.64.0/18", - "162.158.0.0/15", - "172.64.0.0/13", - "173.245.48.0/20", - "188.114.96.0/20", - "190.93.240.0/20", - "197.234.240.0/22", - "198.41.128.0/17", - "3.10.17.128/25", - "3.101.158.0/23", - "3.11.53.0/24", - "3.128.93.0/24", - "3.134.215.0/24", - "3.231.2.0/25", - "3.234.232.224/27", - "3.236.169.192/26", - "3.236.48.0/23", - "3.35.130.128/25", - "13.113.196.64/26", - "13.113.203.0/24", - "13.124.199.0/24", - "13.210.67.128/26", - "13.224.0.0/14", - "13.228.69.0/24", - "13.233.177.192/26", - "13.249.0.0/16", - "13.32.0.0/15", - "13.35.0.0/16", - "13.48.32.0/24", - "13.54.63.128/26", - "13.59.250.0/26", - "15.158.0.0/16", - "15.188.184.0/24", - "15.207.13.128/25", - "15.207.213.128/25", - "18.154.0.0/15", - "18.160.0.0/15", - "18.164.0.0/15", - "18.172.0.0/15", - "18.192.142.0/23", - "18.200.212.0/23", - "18.216.170.128/25", - "18.229.220.192/26", - "18.238.0.0/15", - "18.244.0.0/15", - "18.64.0.0/14", - "18.68.0.0/16", - "34.195.252.0/24", - "34.216.51.0/25", - "34.223.12.224/27", - "34.223.80.192/26", - "34.226.14.0/24", - "35.158.136.0/24", - "35.162.63.192/26", - "35.167.191.128/26", - "36.103.232.0/25", - "36.103.232.128/26", - "44.227.178.0/24", - "44.234.108.128/25", - "44.234.90.252/30", - "52.124.128.0/17", - "52.15.127.128/26", - "52.199.127.192/26", - "52.212.248.0/26", - "52.220.191.0/26", - "52.222.128.0/17", - "52.46.0.0/18", - "52.47.139.0/24", - "52.52.191.128/26", - "52.56.127.0/25", - "52.57.254.0/24", - "52.66.194.128/26", - "52.78.247.128/26", - "52.82.128.0/19", - "52.84.0.0/15", - "54.182.0.0/16", - "54.192.0.0/16", - "54.230.0.0/17", - "54.230.128.0/18", - "54.230.200.0/21", - "54.230.208.0/20", - "54.230.224.0/19", - "54.233.255.128/26", - "54.239.128.0/18", - "54.239.192.0/19", - "54.240.128.0/18", - "58.254.138.0/25", - "58.254.138.128/26", - "64.252.128.0/18", - "64.252.64.0/18", - "65.8.0.0/16", - "65.9.0.0/17", - "65.9.128.0/18", - "70.132.0.0/18", - "71.152.0.0/17", - "99.79.169.0/24", - "99.84.0.0/16", - "99.86.0.0/16", - "108.138.0.0/15", - "108.156.0.0/14", - "116.129.226.0/25", - "116.129.226.128/26", - "118.193.97.128/25", - "118.193.97.64/26", - "119.147.182.0/25", - "119.147.182.128/26", - "120.232.236.0/25", - "120.232.236.128/26", - "120.253.240.192/26", - "120.253.241.160/27", - "120.253.245.128/26", - "120.253.245.192/27", - "120.52.12.64/26", - "120.52.153.192/26", - "120.52.22.96/27", - "120.52.39.128/27", - "130.176.0.0/17", - "130.176.128.0/18", - "130.176.192.0/19", - "130.176.224.0/20", - "143.204.0.0/16", - "144.220.0.0/16", - "180.163.57.0/25", - "180.163.57.128/26", - "204.246.164.0/22", - "204.246.168.0/22", - "204.246.172.0/24", - "204.246.173.0/24", - "204.246.174.0/23", - "204.246.176.0/20", - "205.251.200.0/21", - "205.251.208.0/20", - "205.251.249.0/24", - "205.251.250.0/23", - "205.251.252.0/23", - "205.251.254.0/24", - "216.137.32.0/19", - "223.71.11.0/27", - "223.71.71.128/25", - "223.71.71.96/27", - "104.131.190.124", - "152.199.20.126", - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ], - "windows": [ - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ] - }, - "linux": { - "adminPasswordSecretName": "shm-green-vm-admin-password-linux-update-server", - "disks": { - "os": { - "sizeGb": "64", - "type": "Standard_LRS" - } - }, - "hostname": "LINUX-UPDATES-SHM-GREEN", - "ip": "10.0.3.4", - "vmName": "LINUX-UPDATES-SHM-GREEN", - "vmSize": "Standard_B2ms" - }, - "schedule": { - "daily_definition_updates": { - "hour": "01", - "minute": "01" - }, - "weekly_system_updates": { - "day": "Tuesday", - "hour": "02", - "minute": "02" - } - } - } - }, - "name": "Turing Data Safe Haven (Green)", - "network": { - "vnet": { - "cidr": "10.0.0.0/21", - "name": "VNET_SHM_GREEN", - "rg": "RG_SHM_GREEN_NETWORKING", - "subnets": { - "firewall": { - "cidr": "10.0.2.0/24", - "name": "AzureFirewallSubnet" - }, - "gateway": { - "cidr": "10.0.7.0/24", - "name": "GatewaySubnet" - }, - "identity": { - "cidr": "10.0.0.0/24", - "name": "IdentitySubnet", - "nsg": { - "name": "NSG_SHM_GREEN_IDENTITY", - "rules": "shm-nsg-rules-identity.json" - } - }, - "monitoring": { - "cidr": "10.0.1.0/24", - "name": "MonitoringSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_MONITORING", - "rules": "shm-nsg-rules-monitoring.json" - } - }, - "updateServers": { - "cidr": "10.0.3.0/24", - "name": "UpdateServersSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_UPDATE_SERVERS", - "rules": "shm-nsg-rules-update-servers.json" - } - } - } - }, - "vnetRepositoriesTier2": { - "cidr": "10.10.2.0/24", - "name": "VNET_SHM_GREEN_PACKAGE_REPOSITORIES_TIER_2", - "rg": "RG_SHM_GREEN_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.2.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_DEPLOYMENT_TIER_2", - "rules": "shm-nsg-rules-repository-deployment-tier2.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.2.64/26", - "name": "RepositoryMirrorsExternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_EXTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-external-tier2.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.2.128/26", - "name": "RepositoryMirrorsInternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_INTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier2.json" - } - }, - "proxies": { - "cidr": "10.10.2.192/26", - "name": "RepositoryProxiesTier2Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_PROXIES_TIER_2", - "rules": "shm-nsg-rules-repository-proxies-tier2.json" - } - } - } - }, - "vnetRepositoriesTier3": { - "cidr": "10.10.3.0/24", - "name": "VNET_SHM_GREEN_PACKAGE_REPOSITORIES_TIER_3", - "rg": "RG_SHM_GREEN_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.3.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_DEPLOYMENT_TIER_3", - "rules": "shm-nsg-rules-repository-deployment-tier3.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.3.64/26", - "name": "RepositoryMirrorsExternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_EXTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-external-tier3.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.3.128/26", - "name": "RepositoryMirrorsInternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_INTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier3.json" - } - }, - "proxies": { - "cidr": "10.10.3.192/26", - "name": "RepositoryProxiesTier3Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_PROXIES_TIER_3", - "rules": "shm-nsg-rules-repository-proxies-tier3.json" - } - } - } - }, - "vpn": { - "cidr": "172.16.201.0/24" - } - }, - "nps": { - "adminPasswordSecretName": "shm-green-vm-admin-password-nps", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "hostname": "NPS-SHM-GREEN", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.6", - "rg": "RG_SHM_GREEN_NPS", - "vmName": "NPS-SHM-GREEN", - "vmSize": "Standard_D2s_v3" - }, - "nsgPrefix": "NSG_SHM_GREEN", - "organisation": { - "contactEmail": "safehavendevs@turing.ac.uk", - "countryCode": "GB", - "name": "The Alan Turing Institute", - "stateCountyRegion": "London", - "townCity": "London" - }, - "repositories": { - "rg": "RG_SHM_GREEN_PACKAGE_REPOSITORIES", - "tier2": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-green-vm-admin-password-many-repository-proxy-tier-2", - "applicationAdminPasswordSecretName": "shm-green-application-admin-password-many-repository-proxy-tier-2", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.2.198", - "vmName": "SHM-GREEN-MANY-REPOSITORY-PROXY-TIER-2", - "vmSize": "Standard_B2ms" - } - } - }, - "tier3": { - "mirrorsExternal": { - "cran": { - "adminPasswordSecretName": "shm-green-vm-admin-password-cran-repository-mirror-external-tier-3", - "disks": { - "data": { - "sizeGb": 32, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.68", - "vmName": "SHM-GREEN-CRAN-REPOSITORY-MIRROR-EXTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - }, - "pypi": { - "adminPasswordSecretName": "shm-green-vm-admin-password-pypi-repository-mirror-external-tier-3", - "disks": { - "data": { - "sizeGb": 1024, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.69", - "vmName": "SHM-GREEN-PYPI-REPOSITORY-MIRROR-EXTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - } - }, - "mirrorsInternal": { - "cran": { - "adminPasswordSecretName": "shm-green-vm-admin-password-cran-repository-mirror-internal-tier-3", - "disks": { - "data": { - "sizeGb": 32, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.132", - "vmName": "SHM-GREEN-CRAN-REPOSITORY-MIRROR-INTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - }, - "pypi": { - "adminPasswordSecretName": "shm-green-vm-admin-password-pypi-repository-mirror-internal-tier-3", - "disks": { - "data": { - "sizeGb": 1024, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.133", - "vmName": "SHM-GREEN-PYPI-REPOSITORY-MIRROR-INTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - } - } - } - }, - "rgPrefix": "RG_SHM_GREEN", - "srdImage": { - "bootdiagnostics": { - "accountName": "vmimagesbootdiagcmtvwnes", - "rg": "RG_VMIMAGES_BOOT_DIAGNOSTICS" - }, - "build": { - "nsg": { - "allowedIpAddresses": [ - "193.60.220.240", - "193.60.220.253" - ], - "name": "NSG_VMIMAGES_BUILD_CANDIDATES", - "rules": "vmimages-nsg-rules-build-candidates.json" - }, - "rg": "RG_VMIMAGES_BUILD_CANDIDATES", - "subnet": { - "cidr": "10.48.0.0/24", - "name": "BuildCandidatesSubnet" - }, - "vm": { - "diskSizeGb": 128, - "diskType": "Standard_LRS", - "size": "Standard_F8s_v2" - }, - "vnet": { - "cidr": "10.48.0.0/16", - "name": "VNET_VMIMAGES" - } - }, - "gallery": { - "name": "DATA_SAFE_HAVEN_SHARED_IMAGES", - "rg": "RG_VMIMAGES_GALLERY" - }, - "images": { - "rg": "RG_VMIMAGES_STORAGE" - }, - "keyVault": { - "name": "kv-shm-green-images", - "rg": "RG_VMIMAGES_SECRETS" - }, - "location": "uksouth", - "network": { - "rg": "RG_VMIMAGES_NETWORKING" - }, - "subscription": "[Prod] Safe Haven VM Images" - }, - "storage": { - "artifacts": { - "accountName": "shmgreenartifactsnrzabxp", - "containers": { - "shmArtifactsDC": "shm-artifacts-dc", - "shmArtifactsNPS": "shm-artifacts-nps", - "shmDesiredState": "shm-desired-state" - }, - "rg": "RG_SHM_GREEN_STORAGE" - }, - "bootdiagnostics": { - "accountName": "shmgreenbootdiagsnrzabxp", - "rg": "RG_SHM_GREEN_STORAGE" - }, - "persistentdata": { - "rg": "RG_SHM_GREEN_PERSISTENT_DATA" - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "time": { - "ntp": { - "serverAddresses": [ - "216.239.35.0", - "216.239.35.4", - "216.239.35.8", - "216.239.35.12" - ], - "serverFqdns": [ - "time.google.com", - "time1.google.com", - "time2.google.com", - "time3.google.com", - "time4.google.com" - ] - }, - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "computerManagers": { - "databaseServers": { - "name": "GREEN Database Servers Manager", - "passwordSecretName": "shm-green-computer-manager-password-database-servers", - "samAccountName": "greendatabasesrvrs" - }, - "identityServers": { - "name": "GREEN Identity Servers Manager", - "passwordSecretName": "shm-green-computer-manager-password-identity-servers", - "samAccountName": "greenidentitysrvrs" - }, - "linuxServers": { - "name": "GREEN Linux Servers Manager", - "passwordSecretName": "shm-green-computer-manager-password-linux-servers", - "samAccountName": "greenlinuxsrvrs" - } - }, - "serviceAccounts": { - "aadLocalSync": { - "name": "GREEN Local AD Sync Administrator", - "passwordSecretName": "shm-green-aad-localsync-password", - "samAccountName": "greenlocaladsync", - "usernameSecretName": "shm-green-aad-localsync-username" - } - } - }, - "vmImagesRgPrefix": "RG_VMIMAGES" -} diff --git a/tests/resources/sre_bluet1guac_full_config.json b/tests/resources/sre_bluet1guac_full_config.json deleted file mode 100644 index 7c0e22e826..0000000000 --- a/tests/resources/sre_bluet1guac_full_config.json +++ /dev/null @@ -1,1416 +0,0 @@ -{ - "shm": { - "azureAdminGroupName": "Safe Haven Test Admins", - "azureAdTenantId": "1d93a235-0c16-4f39-adff-a550719bdb49", - "dc": { - "adDirectory": "C:\\ActiveDirectory", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "external_dns_resolver": "168.63.129.16", - "fqdn": "dc1-shm-blue.blue.develop.turingsafehaven.ac.uk", - "hostname": "DC1-SHM-BLUE", - "hostnameLower": "dc1-shm-blue", - "hostnameUpper": "DC1-SHM-BLUE", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.4", - "rg": "RG_SHM_BLUE_DC", - "safemodePasswordSecretName": "shm-blue-vm-safemode-password-dc", - "vmName": "DC1-SHM-BLUE", - "vmSize": "Standard_D2s_v3" - }, - "dcb": { - "fqdn": "dc2-shm-blue.blue.develop.turingsafehaven.ac.uk", - "hostname": "DC2-SHM-BLUE", - "ip": "10.0.0.5", - "vmName": "DC2-SHM-BLUE" - }, - "diskTypeDefault": "Standard_LRS", - "dns": { - "rg": "RG_SHM_DNS_TEST", - "subscriptionName": "[Prod] Safe Haven Domains" - }, - "dockerAccount": "youraccountname", - "dockerPassword": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0", - "domain": { - "dn": "DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "blue.develop.turingsafehaven.ac.uk", - "fqdnLower": "blue.develop.turingsafehaven.ac.uk", - "fqdnUpper": "BLUE.DEVELOP.TURINGSAFEHAVEN.AC.UK", - "netbiosName": "BLUE", - "ous": { - "databaseServers": { - "name": "Secure Research Environment Database Servers", - "path": "OU=Secure Research Environment Database Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "identityServers": { - "name": "Safe Haven Identity Servers", - "path": "OU=Safe Haven Identity Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "linuxServers": { - "name": "Secure Research Environment Linux Servers", - "path": "OU=Secure Research Environment Linux Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "researchUsers": { - "name": "Safe Haven Research Users", - "path": "OU=Safe Haven Research Users,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "securityGroups": { - "name": "Safe Haven Security Groups", - "path": "OU=Safe Haven Security Groups,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "serviceAccounts": { - "name": "Safe Haven Service Accounts", - "path": "OU=Safe Haven Service Accounts,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - } - }, - "securityGroups": { - "computerManagers": { - "description": "SG Safe Haven Computer Management Users", - "name": "SG Safe Haven Computer Management Users" - }, - "serverAdmins": { - "description": "SG Safe Haven Server Administrators", - "name": "SG Safe Haven Server Administrators" - } - } - }, - "firewall": { - "name": "FIREWALL-SHM-BLUE", - "routeTableName": "ROUTE-TABLE-SHM-BLUE" - }, - "id": "blue", - "keyVault": { - "name": "kv-shm-blue", - "rg": "RG_SHM_BLUE_SECRETS", - "secretNames": { - "aadEmergencyAdminPassword": "shm-blue-aad-emergency-admin-password", - "aadEmergencyAdminUsername": "shm-blue-aad-emergency-admin-username", - "buildImageAdminPassword": "shm-blue-buildimage-admin-password", - "buildImageAdminUsername": "shm-blue-buildimage-admin-username", - "domainAdminPassword": "shm-blue-domain-admin-password", - "domainAdminUsername": "shm-blue-domain-admin-username", - "vmAdminUsername": "shm-blue-vm-admin-username", - "vpnCaCertificate": "shm-blue-vpn-ca-cert", - "vpnCaCertificatePlain": "shm-blue-vpn-ca-cert-plain", - "vpnCaCertPassword": "shm-blue-vpn-ca-cert-password", - "vpnClientCertificate": "shm-blue-vpn-client-cert", - "vpnClientCertPassword": "shm-blue-vpn-client-cert-password" - } - }, - "location": "uksouth", - "monitoring": { - "automationAccount": { - "name": "shm-blue-automation" - }, - "loggingWorkspace": { - "name": "shm-blue-loganalytics" - }, - "privatelink": { - "name": "shm-blue-privatelinkscope" - }, - "rg": "RG_SHM_BLUE_MONITORING", - "updateServers": { - "externalIpAddresses": { - "linux": [ - "72.32.157.246", - "87.238.57.227", - "147.75.85.69", - "217.196.149.55", - "91.189.88.0/21", - "185.125.188.0/22", - "103.21.244.0/22", - "103.22.200.0/22", - "103.31.4.0/22", - "104.16.0.0/13", - "104.24.0.0/14", - "108.162.192.0/18", - "131.0.72.0/22", - "141.101.64.0/18", - "162.158.0.0/15", - "172.64.0.0/13", - "173.245.48.0/20", - "188.114.96.0/20", - "190.93.240.0/20", - "197.234.240.0/22", - "198.41.128.0/17", - "3.10.17.128/25", - "3.101.158.0/23", - "3.11.53.0/24", - "3.128.93.0/24", - "3.134.215.0/24", - "3.231.2.0/25", - "3.234.232.224/27", - "3.236.169.192/26", - "3.236.48.0/23", - "3.35.130.128/25", - "13.113.196.64/26", - "13.113.203.0/24", - "13.124.199.0/24", - "13.210.67.128/26", - "13.224.0.0/14", - "13.228.69.0/24", - "13.233.177.192/26", - "13.249.0.0/16", - "13.32.0.0/15", - "13.35.0.0/16", - "13.48.32.0/24", - "13.54.63.128/26", - "13.59.250.0/26", - "15.158.0.0/16", - "15.188.184.0/24", - "15.207.13.128/25", - "15.207.213.128/25", - "18.154.0.0/15", - "18.160.0.0/15", - "18.164.0.0/15", - "18.172.0.0/15", - "18.192.142.0/23", - "18.200.212.0/23", - "18.216.170.128/25", - "18.229.220.192/26", - "18.238.0.0/15", - "18.244.0.0/15", - "18.64.0.0/14", - "18.68.0.0/16", - "34.195.252.0/24", - "34.216.51.0/25", - "34.223.12.224/27", - "34.223.80.192/26", - "34.226.14.0/24", - "35.158.136.0/24", - "35.162.63.192/26", - "35.167.191.128/26", - "36.103.232.0/25", - "36.103.232.128/26", - "44.227.178.0/24", - "44.234.108.128/25", - "44.234.90.252/30", - "52.124.128.0/17", - "52.15.127.128/26", - "52.199.127.192/26", - "52.212.248.0/26", - "52.220.191.0/26", - "52.222.128.0/17", - "52.46.0.0/18", - "52.47.139.0/24", - "52.52.191.128/26", - "52.56.127.0/25", - "52.57.254.0/24", - "52.66.194.128/26", - "52.78.247.128/26", - "52.82.128.0/19", - "52.84.0.0/15", - "54.182.0.0/16", - "54.192.0.0/16", - "54.230.0.0/17", - "54.230.128.0/18", - "54.230.200.0/21", - "54.230.208.0/20", - "54.230.224.0/19", - "54.233.255.128/26", - "54.239.128.0/18", - "54.239.192.0/19", - "54.240.128.0/18", - "58.254.138.0/25", - "58.254.138.128/26", - "64.252.128.0/18", - "64.252.64.0/18", - "65.8.0.0/16", - "65.9.0.0/17", - "65.9.128.0/18", - "70.132.0.0/18", - "71.152.0.0/17", - "99.79.169.0/24", - "99.84.0.0/16", - "99.86.0.0/16", - "108.138.0.0/15", - "108.156.0.0/14", - "116.129.226.0/25", - "116.129.226.128/26", - "118.193.97.128/25", - "118.193.97.64/26", - "119.147.182.0/25", - "119.147.182.128/26", - "120.232.236.0/25", - "120.232.236.128/26", - "120.253.240.192/26", - "120.253.241.160/27", - "120.253.245.128/26", - "120.253.245.192/27", - "120.52.12.64/26", - "120.52.153.192/26", - "120.52.22.96/27", - "120.52.39.128/27", - "130.176.0.0/17", - "130.176.128.0/18", - "130.176.192.0/19", - "130.176.224.0/20", - "143.204.0.0/16", - "144.220.0.0/16", - "180.163.57.0/25", - "180.163.57.128/26", - "204.246.164.0/22", - "204.246.168.0/22", - "204.246.172.0/24", - "204.246.173.0/24", - "204.246.174.0/23", - "204.246.176.0/20", - "205.251.200.0/21", - "205.251.208.0/20", - "205.251.249.0/24", - "205.251.250.0/23", - "205.251.252.0/23", - "205.251.254.0/24", - "216.137.32.0/19", - "223.71.11.0/27", - "223.71.71.128/25", - "223.71.71.96/27", - "104.131.190.124", - "152.199.20.126", - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ], - "windows": [ - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ] - }, - "linux": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-linux-update-server", - "disks": { - "os": { - "sizeGb": "64", - "type": "Standard_LRS" - } - }, - "hostname": "LINUX-UPDATES-SHM-BLUE", - "ip": "10.0.3.4", - "vmName": "LINUX-UPDATES-SHM-BLUE", - "vmSize": "Standard_B2ms" - }, - "schedule": { - "daily_definition_updates": { - "hour": "01", - "minute": "01" - }, - "weekly_system_updates": { - "day": "Tuesday", - "hour": "02", - "minute": "02" - } - } - } - }, - "name": "Turing Data Safe Haven (Blue)", - "network": { - "vnet": { - "cidr": "10.0.0.0/21", - "name": "VNET_SHM_BLUE", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "firewall": { - "cidr": "10.0.2.0/24", - "name": "AzureFirewallSubnet" - }, - "gateway": { - "cidr": "10.0.7.0/24", - "name": "GatewaySubnet" - }, - "identity": { - "cidr": "10.0.0.0/24", - "name": "IdentitySubnet", - "nsg": { - "name": "NSG_SHM_BLUE_IDENTITY", - "rules": "shm-nsg-rules-identity.json" - } - }, - "monitoring": { - "cidr": "10.0.1.0/24", - "name": "MonitoringSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_MONITORING", - "rules": "shm-nsg-rules-monitoring.json" - } - }, - "updateServers": { - "cidr": "10.0.3.0/24", - "name": "UpdateServersSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_UPDATE_SERVERS", - "rules": "shm-nsg-rules-update-servers.json" - } - } - } - }, - "vnetRepositoriesTier2": { - "cidr": "10.10.2.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_2", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.2.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_DEPLOYMENT_TIER_2", - "rules": "shm-nsg-rules-repository-deployment-tier2.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.2.64/26", - "name": "RepositoryMirrorsExternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_EXTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-external-tier2.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.2.128/26", - "name": "RepositoryMirrorsInternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_INTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier2.json" - } - }, - "proxies": { - "cidr": "10.10.2.192/26", - "name": "RepositoryProxiesTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_PROXIES_TIER_2", - "rules": "shm-nsg-rules-repository-proxies-tier2.json" - } - } - } - }, - "vnetRepositoriesTier3": { - "cidr": "10.10.3.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_3", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.3.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_DEPLOYMENT_TIER_3", - "rules": "shm-nsg-rules-repository-deployment-tier3.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.3.64/26", - "name": "RepositoryMirrorsExternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_EXTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-external-tier3.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.3.128/26", - "name": "RepositoryMirrorsInternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_INTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier3.json" - } - }, - "proxies": { - "cidr": "10.10.3.192/26", - "name": "RepositoryProxiesTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_PROXIES_TIER_3", - "rules": "shm-nsg-rules-repository-proxies-tier3.json" - } - } - } - }, - "vpn": { - "cidr": "172.16.201.0/24" - } - }, - "nps": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-nps", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "hostname": "NPS-SHM-BLUE", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.6", - "rg": "RG_SHM_BLUE_NPS", - "vmName": "NPS-SHM-BLUE", - "vmSize": "Standard_D2s_v3" - }, - "nsgPrefix": "NSG_SHM_BLUE", - "organisation": { - "contactEmail": "safehavendevs@turing.ac.uk", - "countryCode": "GB", - "name": "The Alan Turing Institute", - "stateCountyRegion": "London", - "townCity": "London" - }, - "repositories": { - "rg": "RG_SHM_BLUE_PACKAGE_REPOSITORIES", - "tier2": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-many-repository-proxy-tier-2", - "applicationAdminPasswordSecretName": "shm-blue-application-admin-password-many-repository-proxy-tier-2", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.2.198", - "vmName": "SHM-BLUE-MANY-REPOSITORY-PROXY-TIER-2", - "vmSize": "Standard_B2ms" - } - } - }, - "tier3": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-many-repository-proxy-tier-3", - "applicationAdminPasswordSecretName": "shm-blue-application-admin-password-many-repository-proxy-tier-3", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.198", - "vmName": "SHM-BLUE-MANY-REPOSITORY-PROXY-TIER-3", - "vmSize": "Standard_B2ms" - } - } - } - }, - "rgPrefix": "RG_SHM_BLUE", - "srdImage": { - "bootdiagnostics": { - "accountName": "vmimagesbootdiagcmtvwnes", - "rg": "RG_VMIMAGES_BOOT_DIAGNOSTICS" - }, - "build": { - "nsg": { - "allowedIpAddresses": [ - "193.60.220.240", - "193.60.220.253" - ], - "name": "NSG_VMIMAGES_BUILD_CANDIDATES", - "rules": "vmimages-nsg-rules-build-candidates.json" - }, - "rg": "RG_VMIMAGES_BUILD_CANDIDATES", - "subnet": { - "cidr": "10.48.0.0/24", - "name": "BuildCandidatesSubnet" - }, - "vm": { - "diskSizeGb": 128, - "diskType": "Standard_LRS", - "size": "Standard_F8s_v2" - }, - "vnet": { - "cidr": "10.48.0.0/16", - "name": "VNET_VMIMAGES" - } - }, - "gallery": { - "name": "DATA_SAFE_HAVEN_SHARED_IMAGES", - "rg": "RG_VMIMAGES_GALLERY" - }, - "images": { - "rg": "RG_VMIMAGES_STORAGE" - }, - "keyVault": { - "name": "kv-shm-blue-images", - "rg": "RG_VMIMAGES_SECRETS" - }, - "location": "uksouth", - "network": { - "rg": "RG_VMIMAGES_NETWORKING" - }, - "subscription": "[Prod] Safe Haven VM Images" - }, - "storage": { - "artifacts": { - "accountName": "shmblueartifactsanoblwvs", - "containers": { - "shmArtifactsDC": "shm-artifacts-dc", - "shmArtifactsNPS": "shm-artifacts-nps", - "shmDesiredState": "shm-desired-state" - }, - "rg": "RG_SHM_BLUE_STORAGE" - }, - "bootdiagnostics": { - "accountName": "shmbluebootdiagsanoblwvs", - "rg": "RG_SHM_BLUE_STORAGE" - }, - "persistentdata": { - "rg": "RG_SHM_BLUE_PERSISTENT_DATA" - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "time": { - "ntp": { - "serverAddresses": [ - "216.239.35.0", - "216.239.35.4", - "216.239.35.8", - "216.239.35.12" - ], - "serverFqdns": [ - "time.google.com", - "time1.google.com", - "time2.google.com", - "time3.google.com", - "time4.google.com" - ] - }, - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "computerManagers": { - "databaseServers": { - "name": "BLUE Database Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-database-servers", - "samAccountName": "bluedatabasesrvrs" - }, - "identityServers": { - "name": "BLUE Identity Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-identity-servers", - "samAccountName": "blueidentitysrvrs" - }, - "linuxServers": { - "name": "BLUE Linux Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-linux-servers", - "samAccountName": "bluelinuxsrvrs" - } - }, - "serviceAccounts": { - "aadLocalSync": { - "name": "BLUE Local AD Sync Administrator", - "passwordSecretName": "shm-blue-aad-localsync-password", - "samAccountName": "bluelocaladsync", - "usernameSecretName": "shm-blue-aad-localsync-username" - } - } - }, - "vmImagesRgPrefix": "RG_VMIMAGES" - }, - "sre": { - "azureAdminGroupName": "Safe Haven Test Admins", - "backup": { - "blob": { - "policy_name": "blobbackuppolicy" - }, - "disk": { - "policy_name": "diskbackuppolicy" - }, - "rg": "RG_SHM_BLUE_SRE_T1GUAC_BACKUP", - "vault": { - "name": "bv-blue-sre-t1guac" - } - }, - "databases": { - "enabled": true, - "instances": [ - { - "adminPasswordSecretName": "sre-t1guac-vm-admin-password-mssql", - "dbAdminPasswordSecretName": "sre-t1guac-db-admin-password-mssql", - "dbAdminUsernameSecretName": "sre-t1guac-db-admin-username-mssql", - "disks": { - "data": { - "sizeGb": "1024", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "enableSSIS": true, - "ip": "10.151.3.4", - "port": "1433", - "sku": "sqldev-gen2", - "subnet": "databases", - "type": "MSSQL", - "vmName": "MSSQL-T1GUAC", - "vmSize": "Standard_DS2_v2" - }, - { - "adminPasswordSecretName": "sre-t1guac-vm-admin-password-postgresql", - "dbAdminPasswordSecretName": "sre-t1guac-db-admin-password-postgresql", - "dbAdminUsernameSecretName": "sre-t1guac-db-admin-username-postgresql", - "disks": { - "data": { - "sizeGb": "1024", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "ip": "10.151.3.5", - "port": "5432", - "sku": "Ubuntu-latest", - "subnet": "databases", - "type": "PostgreSQL", - "vmName": "PSTGRS-T1GUAC", - "vmSize": "Standard_DS2_v2" - } - ], - "rg": "RG_SHM_BLUE_SRE_T1GUAC_DATABASES" - }, - "diskTypeDefault": "Standard_LRS", - "domain": { - "dn": "DC=t1guac,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "t1guac.blue.develop.turingsafehaven.ac.uk", - "netbiosName": "T1GUAC", - "securityGroups": { - "dataAdministrators": { - "description": "SG T1GUAC Data Administrators", - "name": "SG T1GUAC Data Administrators" - }, - "researchUsers": { - "description": "SG T1GUAC Research Users", - "name": "SG T1GUAC Research Users" - }, - "systemAdministrators": { - "description": "SG T1GUAC System Administrators", - "name": "SG T1GUAC System Administrators" - } - } - }, - "firewall": { - "routeTableName": "ROUTE-TABLE-SRE-T1GUAC" - }, - "id": "t1guac", - "keyVault": { - "name": "kv-blue-sre-t1guac", - "rg": "RG_SHM_BLUE_SRE_T1GUAC_SECRETS", - "secretNames": { - "adminUsername": "sre-t1guac-vm-admin-username", - "letsEncryptCertificate": "sre-t1guac-lets-encrypt-certificate", - "npsSecret": "sre-t1guac-other-nps-secret" - } - }, - "location": "uksouth", - "network": { - "vnet": { - "cidr": "10.151.0.0/21", - "name": "VNET_SHM_BLUE_SRE_T1GUAC", - "rg": "RG_SHM_BLUE_SRE_T1GUAC_NETWORKING", - "subnets": { - "compute": { - "cidr": "10.151.4.0/24", - "name": "ComputeSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T1GUAC_COMPUTE", - "rules": "sre-nsg-rules-compute.json" - } - }, - "data": { - "cidr": "10.151.2.0/24", - "name": "PrivateDataSubnet" - }, - "databases": { - "cidr": "10.151.3.0/24", - "name": "DatabasesSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T1GUAC_DATABASES", - "rules": "sre-nsg-rules-databases.json" - } - }, - "deployment": { - "cidr": "10.151.0.0/24", - "name": "DeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T1GUAC_DEPLOYMENT", - "rules": "sre-nsg-rules-deployment.json" - } - }, - "remoteDesktop": { - "cidr": "10.151.1.0/24", - "name": "RemoteDesktopSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T1GUAC_GUACAMOLE", - "rules": "sre-nsg-rules-guacamole.json" - } - }, - "webapps": { - "cidr": "10.151.5.0/24", - "name": "WebappsSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T1GUAC_WEBAPPS", - "rules": "sre-nsg-rules-webapps.json" - } - } - } - } - }, - "nsgPrefix": "NSG_SHM_BLUE_SRE_T1GUAC", - "remoteDesktop": { - "guacamole": { - "adminPasswordSecretName": "sre-t1guac-vm-admin-password-guacamole", - "databaseAdminPasswordSecretName": "sre-t1guac-db-admin-password-guacamole", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "fqdn": "guacamole-sre-t1guac.blue.develop.turingsafehaven.ac.uk", - "hostname": "GUACAMOLE-SRE-T1GUAC", - "ip": "10.151.1.4", - "vmName": "GUACAMOLE-SRE-T1GUAC", - "vmSize": "Standard_DS2_v2" - }, - "networkRules": { - "allowedSources": "Internet", - "copyAllowed": true, - "includeAzurePlatformDnsRule": false, - "outboundInternet": "Allow", - "pasteAllowed": true - }, - "provider": "ApacheGuacamole", - "rg": "RG_SHM_BLUE_SRE_T1GUAC_REMOTE_DESKTOP" - }, - "repositories": { - "cran": { - "url": "https://cran.r-project.org" - }, - "network": { - "cidr": null, - "name": null - }, - "pypi": { - "host": "pypi.org", - "index": "https://pypi.org", - "indexUrl": "https://pypi.org/simple" - } - }, - "rgPrefix": "RG_SHM_BLUE_SRE_T1GUAC", - "shortName": "sre-t1guac", - "srd": { - "adminPasswordSecretName": "sre-t1guac-vm-admin-password-compute", - "disks": { - "os": { - "sizeGb": "default", - "type": "StandardSSD_LRS" - }, - "scratch": { - "sizeGb": "1024", - "type": "StandardSSD_LRS" - } - }, - "rg": "RG_SHM_BLUE_SRE_T1GUAC_COMPUTE", - "vmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "vmSizeDefault": "Standard_D2s_v3" - }, - "storage": { - "accessPolicies": { - "readOnly": { - "permissions": "rl" - }, - "readWrite": { - "permissions": "racwdl" - } - }, - "artifacts": { - "account": { - "accessTier": "Cool", - "allowedIpAddresses": "any", - "name": "bluet1guacartifactsnrzab", - "performance": "Standard_GRS", - "storageKind": "BlobStorage" - }, - "containers": { - "sreArtifactsRDS": "sre-artifacts-rds", - "sreScriptsRDS": "sre-scripts-rds" - }, - "rg": "RG_SHM_BLUE_SRE_T1GUAC_STORAGE" - }, - "bootdiagnostics": { - "accountName": "bluet1guacbootdiagsnrzab", - "rg": "RG_SHM_BLUE_SRE_T1GUAC_STORAGE" - }, - "persistentdata": { - "account": { - "accessTier": "Hot", - "allowedIpAddresses": "193.60.220.253", - "name": "bluet1guacdatanrzabxpjid", - "performance": "Standard_GRS", - "storageKind": "StorageV2" - }, - "containers": { - "backup": { - "accessPolicyName": "readWrite", - "connectionSecretName": "sre-t1guac-data-backup-connection-readwrite", - "mountType": "BlobSMB" - }, - "egress": { - "accessPolicyName": "readWrite", - "connectionSecretName": "sre-t1guac-data-egress-connection-readwrite", - "mountType": "BlobSMB" - }, - "ingress": { - "accessPolicyName": "readOnly", - "connectionSecretName": "sre-t1guac-data-ingress-connection-readonly", - "mountType": "BlobSMB" - } - } - }, - "userdata": { - "account": { - "accessTier": "Hot", - "name": "bluet1guacuserdatanrzabx", - "performance": "Premium_ZRS", - "rg": "RG_SHM_BLUE_SRE_T1GUAC_STORAGE", - "storageKind": "FileStorage" - }, - "containers": { - "home": { - "accessPolicyName": "readWrite", - "mountType": "NFS", - "sizeGb": "1024" - }, - "shared": { - "accessPolicyName": "readWrite", - "mountType": "NFS", - "sizeGb": "1024" - } - } - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "tier": "1", - "time": { - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "serviceAccounts": { - "ldapSearch": { - "name": "T1GUAC LDAP Search Service Account", - "passwordSecretName": "sre-t1guac-other-service-account-password-ldap-search", - "samAccountName": "t1guacldapsearch" - }, - "postgres": { - "name": "T1GUAC Postgres DB Service Account", - "passwordSecretName": "sre-t1guac-db-service-account-password-postgres", - "samAccountName": "t1guacdbpostgres" - } - } - }, - "webapps": { - "codimd": { - "adminPasswordSecretName": "sre-t1guac-vm-admin-password-codimd", - "codimd": { - "dockerVersion": "2.5.3" - }, - "disks": { - "data": { - "sizeGb": "512", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "32", - "type": "Standard_LRS" - } - }, - "fqdn": "codimd.t1guac.blue.develop.turingsafehaven.ac.uk", - "hostname": "CODIMD", - "ip": "10.151.5.6", - "osVersion": "Ubuntu-latest", - "postgres": { - "dockerVersion": "16-alpine", - "passwordSecretName": "sre-t1guac-other-codimd-password-postgresdb" - }, - "vmName": "CODIMD-SRE-T1GUAC", - "vmSize": "Standard_D2s_v3" - }, - "gitlab": { - "adminPasswordSecretName": "sre-t1guac-vm-admin-password-gitlab", - "disks": { - "data": { - "sizeGb": "512", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "32", - "type": "Standard_LRS" - } - }, - "fqdn": "gitlab.t1guac.blue.develop.turingsafehaven.ac.uk", - "hostname": "GITLAB", - "ip": "10.151.5.5", - "osVersion": "Ubuntu-latest", - "rootPasswordSecretName": "sre-t1guac-other-gitlab-root-password", - "vmName": "GITLAB-SRE-T1GUAC", - "vmSize": "Standard_D2s_v3" - }, - "rg": "RG_SHM_BLUE_SRE_T1GUAC_WEBAPPS" - } - } -} diff --git a/tests/resources/sre_bluet3guac_full_config.json b/tests/resources/sre_bluet3guac_full_config.json deleted file mode 100644 index 4baf01f309..0000000000 --- a/tests/resources/sre_bluet3guac_full_config.json +++ /dev/null @@ -1,1416 +0,0 @@ -{ - "shm": { - "azureAdminGroupName": "Safe Haven Test Admins", - "azureAdTenantId": "1d93a235-0c16-4f39-adff-a550719bdb49", - "dc": { - "adDirectory": "C:\\ActiveDirectory", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "external_dns_resolver": "168.63.129.16", - "fqdn": "dc1-shm-blue.blue.develop.turingsafehaven.ac.uk", - "hostname": "DC1-SHM-BLUE", - "hostnameLower": "dc1-shm-blue", - "hostnameUpper": "DC1-SHM-BLUE", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.4", - "rg": "RG_SHM_BLUE_DC", - "safemodePasswordSecretName": "shm-blue-vm-safemode-password-dc", - "vmName": "DC1-SHM-BLUE", - "vmSize": "Standard_D2s_v3" - }, - "dcb": { - "fqdn": "dc2-shm-blue.blue.develop.turingsafehaven.ac.uk", - "hostname": "DC2-SHM-BLUE", - "ip": "10.0.0.5", - "vmName": "DC2-SHM-BLUE" - }, - "diskTypeDefault": "Standard_LRS", - "dns": { - "rg": "RG_SHM_DNS_TEST", - "subscriptionName": "[Prod] Safe Haven Domains" - }, - "dockerAccount": "youraccountname", - "dockerPassword": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0", - "domain": { - "dn": "DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "blue.develop.turingsafehaven.ac.uk", - "fqdnLower": "blue.develop.turingsafehaven.ac.uk", - "fqdnUpper": "BLUE.DEVELOP.TURINGSAFEHAVEN.AC.UK", - "netbiosName": "BLUE", - "ous": { - "databaseServers": { - "name": "Secure Research Environment Database Servers", - "path": "OU=Secure Research Environment Database Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "identityServers": { - "name": "Safe Haven Identity Servers", - "path": "OU=Safe Haven Identity Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "linuxServers": { - "name": "Secure Research Environment Linux Servers", - "path": "OU=Secure Research Environment Linux Servers,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "researchUsers": { - "name": "Safe Haven Research Users", - "path": "OU=Safe Haven Research Users,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "securityGroups": { - "name": "Safe Haven Security Groups", - "path": "OU=Safe Haven Security Groups,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "serviceAccounts": { - "name": "Safe Haven Service Accounts", - "path": "OU=Safe Haven Service Accounts,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - } - }, - "securityGroups": { - "computerManagers": { - "description": "SG Safe Haven Computer Management Users", - "name": "SG Safe Haven Computer Management Users" - }, - "serverAdmins": { - "description": "SG Safe Haven Server Administrators", - "name": "SG Safe Haven Server Administrators" - } - } - }, - "firewall": { - "name": "FIREWALL-SHM-BLUE", - "routeTableName": "ROUTE-TABLE-SHM-BLUE" - }, - "id": "blue", - "keyVault": { - "name": "kv-shm-blue", - "rg": "RG_SHM_BLUE_SECRETS", - "secretNames": { - "aadEmergencyAdminPassword": "shm-blue-aad-emergency-admin-password", - "aadEmergencyAdminUsername": "shm-blue-aad-emergency-admin-username", - "buildImageAdminPassword": "shm-blue-buildimage-admin-password", - "buildImageAdminUsername": "shm-blue-buildimage-admin-username", - "domainAdminPassword": "shm-blue-domain-admin-password", - "domainAdminUsername": "shm-blue-domain-admin-username", - "vmAdminUsername": "shm-blue-vm-admin-username", - "vpnCaCertificate": "shm-blue-vpn-ca-cert", - "vpnCaCertificatePlain": "shm-blue-vpn-ca-cert-plain", - "vpnCaCertPassword": "shm-blue-vpn-ca-cert-password", - "vpnClientCertificate": "shm-blue-vpn-client-cert", - "vpnClientCertPassword": "shm-blue-vpn-client-cert-password" - } - }, - "location": "uksouth", - "monitoring": { - "automationAccount": { - "name": "shm-blue-automation" - }, - "loggingWorkspace": { - "name": "shm-blue-loganalytics" - }, - "privatelink": { - "name": "shm-blue-privatelinkscope" - }, - "rg": "RG_SHM_BLUE_MONITORING", - "updateServers": { - "externalIpAddresses": { - "linux": [ - "72.32.157.246", - "87.238.57.227", - "147.75.85.69", - "217.196.149.55", - "91.189.88.0/21", - "185.125.188.0/22", - "103.21.244.0/22", - "103.22.200.0/22", - "103.31.4.0/22", - "104.16.0.0/13", - "104.24.0.0/14", - "108.162.192.0/18", - "131.0.72.0/22", - "141.101.64.0/18", - "162.158.0.0/15", - "172.64.0.0/13", - "173.245.48.0/20", - "188.114.96.0/20", - "190.93.240.0/20", - "197.234.240.0/22", - "198.41.128.0/17", - "3.10.17.128/25", - "3.101.158.0/23", - "3.11.53.0/24", - "3.128.93.0/24", - "3.134.215.0/24", - "3.231.2.0/25", - "3.234.232.224/27", - "3.236.169.192/26", - "3.236.48.0/23", - "3.35.130.128/25", - "13.113.196.64/26", - "13.113.203.0/24", - "13.124.199.0/24", - "13.210.67.128/26", - "13.224.0.0/14", - "13.228.69.0/24", - "13.233.177.192/26", - "13.249.0.0/16", - "13.32.0.0/15", - "13.35.0.0/16", - "13.48.32.0/24", - "13.54.63.128/26", - "13.59.250.0/26", - "15.158.0.0/16", - "15.188.184.0/24", - "15.207.13.128/25", - "15.207.213.128/25", - "18.154.0.0/15", - "18.160.0.0/15", - "18.164.0.0/15", - "18.172.0.0/15", - "18.192.142.0/23", - "18.200.212.0/23", - "18.216.170.128/25", - "18.229.220.192/26", - "18.238.0.0/15", - "18.244.0.0/15", - "18.64.0.0/14", - "18.68.0.0/16", - "34.195.252.0/24", - "34.216.51.0/25", - "34.223.12.224/27", - "34.223.80.192/26", - "34.226.14.0/24", - "35.158.136.0/24", - "35.162.63.192/26", - "35.167.191.128/26", - "36.103.232.0/25", - "36.103.232.128/26", - "44.227.178.0/24", - "44.234.108.128/25", - "44.234.90.252/30", - "52.124.128.0/17", - "52.15.127.128/26", - "52.199.127.192/26", - "52.212.248.0/26", - "52.220.191.0/26", - "52.222.128.0/17", - "52.46.0.0/18", - "52.47.139.0/24", - "52.52.191.128/26", - "52.56.127.0/25", - "52.57.254.0/24", - "52.66.194.128/26", - "52.78.247.128/26", - "52.82.128.0/19", - "52.84.0.0/15", - "54.182.0.0/16", - "54.192.0.0/16", - "54.230.0.0/17", - "54.230.128.0/18", - "54.230.200.0/21", - "54.230.208.0/20", - "54.230.224.0/19", - "54.233.255.128/26", - "54.239.128.0/18", - "54.239.192.0/19", - "54.240.128.0/18", - "58.254.138.0/25", - "58.254.138.128/26", - "64.252.128.0/18", - "64.252.64.0/18", - "65.8.0.0/16", - "65.9.0.0/17", - "65.9.128.0/18", - "70.132.0.0/18", - "71.152.0.0/17", - "99.79.169.0/24", - "99.84.0.0/16", - "99.86.0.0/16", - "108.138.0.0/15", - "108.156.0.0/14", - "116.129.226.0/25", - "116.129.226.128/26", - "118.193.97.128/25", - "118.193.97.64/26", - "119.147.182.0/25", - "119.147.182.128/26", - "120.232.236.0/25", - "120.232.236.128/26", - "120.253.240.192/26", - "120.253.241.160/27", - "120.253.245.128/26", - "120.253.245.192/27", - "120.52.12.64/26", - "120.52.153.192/26", - "120.52.22.96/27", - "120.52.39.128/27", - "130.176.0.0/17", - "130.176.128.0/18", - "130.176.192.0/19", - "130.176.224.0/20", - "143.204.0.0/16", - "144.220.0.0/16", - "180.163.57.0/25", - "180.163.57.128/26", - "204.246.164.0/22", - "204.246.168.0/22", - "204.246.172.0/24", - "204.246.173.0/24", - "204.246.174.0/23", - "204.246.176.0/20", - "205.251.200.0/21", - "205.251.208.0/20", - "205.251.249.0/24", - "205.251.250.0/23", - "205.251.252.0/23", - "205.251.254.0/24", - "216.137.32.0/19", - "223.71.11.0/27", - "223.71.71.128/25", - "223.71.71.96/27", - "104.131.190.124", - "152.199.20.126", - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ], - "windows": [ - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ] - }, - "linux": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-linux-update-server", - "disks": { - "os": { - "sizeGb": "64", - "type": "Standard_LRS" - } - }, - "hostname": "LINUX-UPDATES-SHM-BLUE", - "ip": "10.0.3.4", - "vmName": "LINUX-UPDATES-SHM-BLUE", - "vmSize": "Standard_B2ms" - }, - "schedule": { - "daily_definition_updates": { - "hour": "01", - "minute": "01" - }, - "weekly_system_updates": { - "day": "Tuesday", - "hour": "02", - "minute": "02" - } - } - } - }, - "name": "Turing Data Safe Haven (Blue)", - "network": { - "vnet": { - "cidr": "10.0.0.0/21", - "name": "VNET_SHM_BLUE", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "firewall": { - "cidr": "10.0.2.0/24", - "name": "AzureFirewallSubnet" - }, - "gateway": { - "cidr": "10.0.7.0/24", - "name": "GatewaySubnet" - }, - "identity": { - "cidr": "10.0.0.0/24", - "name": "IdentitySubnet", - "nsg": { - "name": "NSG_SHM_BLUE_IDENTITY", - "rules": "shm-nsg-rules-identity.json" - } - }, - "monitoring": { - "cidr": "10.0.1.0/24", - "name": "MonitoringSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_MONITORING", - "rules": "shm-nsg-rules-monitoring.json" - } - }, - "updateServers": { - "cidr": "10.0.3.0/24", - "name": "UpdateServersSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_UPDATE_SERVERS", - "rules": "shm-nsg-rules-update-servers.json" - } - } - } - }, - "vnetRepositoriesTier2": { - "cidr": "10.10.2.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_2", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.2.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_DEPLOYMENT_TIER_2", - "rules": "shm-nsg-rules-repository-deployment-tier2.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.2.64/26", - "name": "RepositoryMirrorsExternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_EXTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-external-tier2.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.2.128/26", - "name": "RepositoryMirrorsInternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_INTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier2.json" - } - }, - "proxies": { - "cidr": "10.10.2.192/26", - "name": "RepositoryProxiesTier2Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_PROXIES_TIER_2", - "rules": "shm-nsg-rules-repository-proxies-tier2.json" - } - } - } - }, - "vnetRepositoriesTier3": { - "cidr": "10.10.3.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_3", - "rg": "RG_SHM_BLUE_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.3.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_DEPLOYMENT_TIER_3", - "rules": "shm-nsg-rules-repository-deployment-tier3.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.3.64/26", - "name": "RepositoryMirrorsExternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_EXTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-external-tier3.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.3.128/26", - "name": "RepositoryMirrorsInternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_MIRRORS_INTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier3.json" - } - }, - "proxies": { - "cidr": "10.10.3.192/26", - "name": "RepositoryProxiesTier3Subnet", - "nsg": { - "name": "NSG_SHM_BLUE_REPOSITORY_PROXIES_TIER_3", - "rules": "shm-nsg-rules-repository-proxies-tier3.json" - } - } - } - }, - "vpn": { - "cidr": "172.16.201.0/24" - } - }, - "nps": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-nps", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "hostname": "NPS-SHM-BLUE", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.6", - "rg": "RG_SHM_BLUE_NPS", - "vmName": "NPS-SHM-BLUE", - "vmSize": "Standard_D2s_v3" - }, - "nsgPrefix": "NSG_SHM_BLUE", - "organisation": { - "contactEmail": "safehavendevs@turing.ac.uk", - "countryCode": "GB", - "name": "The Alan Turing Institute", - "stateCountyRegion": "London", - "townCity": "London" - }, - "repositories": { - "rg": "RG_SHM_BLUE_PACKAGE_REPOSITORIES", - "tier2": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-many-repository-proxy-tier-2", - "applicationAdminPasswordSecretName": "shm-blue-application-admin-password-many-repository-proxy-tier-2", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.2.198", - "vmName": "SHM-BLUE-MANY-REPOSITORY-PROXY-TIER-2", - "vmSize": "Standard_B2ms" - } - } - }, - "tier3": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-blue-vm-admin-password-many-repository-proxy-tier-3", - "applicationAdminPasswordSecretName": "shm-blue-application-admin-password-many-repository-proxy-tier-3", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.198", - "vmName": "SHM-BLUE-MANY-REPOSITORY-PROXY-TIER-3", - "vmSize": "Standard_B2ms" - } - } - } - }, - "rgPrefix": "RG_SHM_BLUE", - "srdImage": { - "bootdiagnostics": { - "accountName": "vmimagesbootdiagcmtvwnes", - "rg": "RG_VMIMAGES_BOOT_DIAGNOSTICS" - }, - "build": { - "nsg": { - "allowedIpAddresses": [ - "193.60.220.240", - "193.60.220.253" - ], - "name": "NSG_VMIMAGES_BUILD_CANDIDATES", - "rules": "vmimages-nsg-rules-build-candidates.json" - }, - "rg": "RG_VMIMAGES_BUILD_CANDIDATES", - "subnet": { - "cidr": "10.48.0.0/24", - "name": "BuildCandidatesSubnet" - }, - "vm": { - "diskSizeGb": 128, - "diskType": "Standard_LRS", - "size": "Standard_F8s_v2" - }, - "vnet": { - "cidr": "10.48.0.0/16", - "name": "VNET_VMIMAGES" - } - }, - "gallery": { - "name": "DATA_SAFE_HAVEN_SHARED_IMAGES", - "rg": "RG_VMIMAGES_GALLERY" - }, - "images": { - "rg": "RG_VMIMAGES_STORAGE" - }, - "keyVault": { - "name": "kv-shm-blue-images", - "rg": "RG_VMIMAGES_SECRETS" - }, - "location": "uksouth", - "network": { - "rg": "RG_VMIMAGES_NETWORKING" - }, - "subscription": "[Prod] Safe Haven VM Images" - }, - "storage": { - "artifacts": { - "accountName": "shmblueartifactsanoblwvs", - "containers": { - "shmArtifactsDC": "shm-artifacts-dc", - "shmArtifactsNPS": "shm-artifacts-nps", - "shmDesiredState": "shm-desired-state" - }, - "rg": "RG_SHM_BLUE_STORAGE" - }, - "bootdiagnostics": { - "accountName": "shmbluebootdiagsanoblwvs", - "rg": "RG_SHM_BLUE_STORAGE" - }, - "persistentdata": { - "rg": "RG_SHM_BLUE_PERSISTENT_DATA" - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "time": { - "ntp": { - "serverAddresses": [ - "216.239.35.0", - "216.239.35.4", - "216.239.35.8", - "216.239.35.12" - ], - "serverFqdns": [ - "time.google.com", - "time1.google.com", - "time2.google.com", - "time3.google.com", - "time4.google.com" - ] - }, - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "computerManagers": { - "databaseServers": { - "name": "BLUE Database Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-database-servers", - "samAccountName": "bluedatabasesrvrs" - }, - "identityServers": { - "name": "BLUE Identity Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-identity-servers", - "samAccountName": "blueidentitysrvrs" - }, - "linuxServers": { - "name": "BLUE Linux Servers Manager", - "passwordSecretName": "shm-blue-computer-manager-password-linux-servers", - "samAccountName": "bluelinuxsrvrs" - } - }, - "serviceAccounts": { - "aadLocalSync": { - "name": "BLUE Local AD Sync Administrator", - "passwordSecretName": "shm-blue-aad-localsync-password", - "samAccountName": "bluelocaladsync", - "usernameSecretName": "shm-blue-aad-localsync-username" - } - } - }, - "vmImagesRgPrefix": "RG_VMIMAGES" - }, - "sre": { - "azureAdminGroupName": "Safe Haven Test Admins", - "backup": { - "blob": { - "policy_name": "blobbackuppolicy" - }, - "disk": { - "policy_name": "diskbackuppolicy" - }, - "rg": "RG_SHM_BLUE_SRE_T3GUAC_BACKUP", - "vault": { - "name": "bv-blue-sre-t3guac" - } - }, - "databases": { - "enabled": true, - "instances": [ - { - "adminPasswordSecretName": "sre-t3guac-vm-admin-password-mssql", - "dbAdminPasswordSecretName": "sre-t3guac-db-admin-password-mssql", - "dbAdminUsernameSecretName": "sre-t3guac-db-admin-username-mssql", - "disks": { - "data": { - "sizeGb": "1024", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "enableSSIS": true, - "ip": "10.153.3.4", - "port": "1433", - "sku": "sqldev-gen2", - "subnet": "databases", - "type": "MSSQL", - "vmName": "MSSQL-T3GUAC", - "vmSize": "Standard_DS2_v2" - }, - { - "adminPasswordSecretName": "sre-t3guac-vm-admin-password-postgresql", - "dbAdminPasswordSecretName": "sre-t3guac-db-admin-password-postgresql", - "dbAdminUsernameSecretName": "sre-t3guac-db-admin-username-postgresql", - "disks": { - "data": { - "sizeGb": "1024", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "ip": "10.153.3.5", - "port": "5432", - "sku": "Ubuntu-latest", - "subnet": "databases", - "type": "PostgreSQL", - "vmName": "PSTGRS-T3GUAC", - "vmSize": "Standard_DS2_v2" - } - ], - "rg": "RG_SHM_BLUE_SRE_T3GUAC_DATABASES" - }, - "diskTypeDefault": "Standard_LRS", - "domain": { - "dn": "DC=t3guac,DC=blue,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "t3guac.blue.develop.turingsafehaven.ac.uk", - "netbiosName": "T3GUAC", - "securityGroups": { - "dataAdministrators": { - "description": "SG T3GUAC Data Administrators", - "name": "SG T3GUAC Data Administrators" - }, - "researchUsers": { - "description": "SG T3GUAC Research Users", - "name": "SG T3GUAC Research Users" - }, - "systemAdministrators": { - "description": "SG T3GUAC System Administrators", - "name": "SG T3GUAC System Administrators" - } - } - }, - "firewall": { - "routeTableName": "ROUTE-TABLE-SRE-T3GUAC" - }, - "id": "t3guac", - "keyVault": { - "name": "kv-blue-sre-t3guac", - "rg": "RG_SHM_BLUE_SRE_T3GUAC_SECRETS", - "secretNames": { - "adminUsername": "sre-t3guac-vm-admin-username", - "letsEncryptCertificate": "sre-t3guac-lets-encrypt-certificate", - "npsSecret": "sre-t3guac-other-nps-secret" - } - }, - "location": "uksouth", - "network": { - "vnet": { - "cidr": "10.153.0.0/21", - "name": "VNET_SHM_BLUE_SRE_T3GUAC", - "rg": "RG_SHM_BLUE_SRE_T3GUAC_NETWORKING", - "subnets": { - "compute": { - "cidr": "10.153.4.0/24", - "name": "ComputeSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T3GUAC_COMPUTE", - "rules": "sre-nsg-rules-compute.json" - } - }, - "data": { - "cidr": "10.153.2.0/24", - "name": "PrivateDataSubnet" - }, - "databases": { - "cidr": "10.153.3.0/24", - "name": "DatabasesSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T3GUAC_DATABASES", - "rules": "sre-nsg-rules-databases.json" - } - }, - "deployment": { - "cidr": "10.153.0.0/24", - "name": "DeploymentSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T3GUAC_DEPLOYMENT", - "rules": "sre-nsg-rules-deployment.json" - } - }, - "remoteDesktop": { - "cidr": "10.153.1.0/24", - "name": "RemoteDesktopSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T3GUAC_GUACAMOLE", - "rules": "sre-nsg-rules-guacamole.json" - } - }, - "webapps": { - "cidr": "10.153.5.0/24", - "name": "WebappsSubnet", - "nsg": { - "name": "NSG_SHM_BLUE_SRE_T3GUAC_WEBAPPS", - "rules": "sre-nsg-rules-webapps.json" - } - } - } - } - }, - "nsgPrefix": "NSG_SHM_BLUE_SRE_T3GUAC", - "remoteDesktop": { - "guacamole": { - "adminPasswordSecretName": "sre-t3guac-vm-admin-password-guacamole", - "databaseAdminPasswordSecretName": "sre-t3guac-db-admin-password-guacamole", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "fqdn": "guacamole-sre-t3guac.blue.develop.turingsafehaven.ac.uk", - "hostname": "GUACAMOLE-SRE-T3GUAC", - "ip": "10.153.1.4", - "vmName": "GUACAMOLE-SRE-T3GUAC", - "vmSize": "Standard_DS2_v2" - }, - "networkRules": { - "allowedSources": "193.60.220.240", - "copyAllowed": false, - "includeAzurePlatformDnsRule": true, - "outboundInternet": "Deny", - "pasteAllowed": false - }, - "provider": "ApacheGuacamole", - "rg": "RG_SHM_BLUE_SRE_T3GUAC_REMOTE_DESKTOP" - }, - "repositories": { - "cran": { - "url": "http://10.10.3.198:80/repository/cran-proxy" - }, - "network": { - "cidr": "10.10.3.0/24", - "name": "VNET_SHM_BLUE_PACKAGE_REPOSITORIES_TIER_3" - }, - "pypi": { - "host": "10.10.3.198", - "index": "http://10.10.3.198:80/repository/pypi-proxy", - "indexUrl": "http://10.10.3.198:80/repository/pypi-proxy/simple" - } - }, - "rgPrefix": "RG_SHM_BLUE_SRE_T3GUAC", - "shortName": "sre-t3guac", - "srd": { - "adminPasswordSecretName": "sre-t3guac-vm-admin-password-compute", - "disks": { - "os": { - "sizeGb": "default", - "type": "StandardSSD_LRS" - }, - "scratch": { - "sizeGb": "1024", - "type": "StandardSSD_LRS" - } - }, - "rg": "RG_SHM_BLUE_SRE_T3GUAC_COMPUTE", - "vmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "vmSizeDefault": "Standard_D2s_v3" - }, - "storage": { - "accessPolicies": { - "readOnly": { - "permissions": "rl" - }, - "readWrite": { - "permissions": "racwdl" - } - }, - "artifacts": { - "account": { - "accessTier": "Cool", - "allowedIpAddresses": "any", - "name": "bluet3guacartifactsnrzab", - "performance": "Standard_GRS", - "storageKind": "BlobStorage" - }, - "containers": { - "sreArtifactsRDS": "sre-artifacts-rds", - "sreScriptsRDS": "sre-scripts-rds" - }, - "rg": "RG_SHM_BLUE_SRE_T3GUAC_STORAGE" - }, - "bootdiagnostics": { - "accountName": "bluet3guacbootdiagsnrzab", - "rg": "RG_SHM_BLUE_SRE_T3GUAC_STORAGE" - }, - "persistentdata": { - "account": { - "accessTier": "Hot", - "allowedIpAddresses": "193.60.220.240", - "name": "bluet3guacdatanrzabxpjid", - "performance": "Standard_GRS", - "storageKind": "StorageV2" - }, - "containers": { - "backup": { - "accessPolicyName": "readWrite", - "connectionSecretName": "sre-t3guac-data-backup-connection-readwrite", - "mountType": "BlobSMB" - }, - "egress": { - "accessPolicyName": "readWrite", - "connectionSecretName": "sre-t3guac-data-egress-connection-readwrite", - "mountType": "BlobSMB" - }, - "ingress": { - "accessPolicyName": "readOnly", - "connectionSecretName": "sre-t3guac-data-ingress-connection-readonly", - "mountType": "BlobSMB" - } - } - }, - "userdata": { - "account": { - "accessTier": "Hot", - "name": "bluet3guacuserdatanrzabx", - "performance": "Premium_ZRS", - "rg": "RG_SHM_BLUE_SRE_T3GUAC_STORAGE", - "storageKind": "FileStorage" - }, - "containers": { - "home": { - "accessPolicyName": "readWrite", - "mountType": "NFS", - "sizeGb": "1024" - }, - "shared": { - "accessPolicyName": "readWrite", - "mountType": "NFS", - "sizeGb": "1024" - } - } - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "tier": "3", - "time": { - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "serviceAccounts": { - "ldapSearch": { - "name": "T3GUAC LDAP Search Service Account", - "passwordSecretName": "sre-t3guac-other-service-account-password-ldap-search", - "samAccountName": "t3guacldapsearch" - }, - "postgres": { - "name": "T3GUAC Postgres DB Service Account", - "passwordSecretName": "sre-t3guac-db-service-account-password-postgres", - "samAccountName": "t3guacdbpostgres" - } - } - }, - "webapps": { - "codimd": { - "adminPasswordSecretName": "sre-t3guac-vm-admin-password-codimd", - "codimd": { - "dockerVersion": "2.5.3" - }, - "disks": { - "data": { - "sizeGb": "512", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "32", - "type": "Standard_LRS" - } - }, - "fqdn": "codimd.t3guac.blue.develop.turingsafehaven.ac.uk", - "hostname": "CODIMD", - "ip": "10.153.5.6", - "osVersion": "Ubuntu-latest", - "postgres": { - "dockerVersion": "16-alpine", - "passwordSecretName": "sre-t3guac-other-codimd-password-postgresdb" - }, - "vmName": "CODIMD-SRE-T3GUAC", - "vmSize": "Standard_D2s_v3" - }, - "gitlab": { - "adminPasswordSecretName": "sre-t3guac-vm-admin-password-gitlab", - "disks": { - "data": { - "sizeGb": "512", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "32", - "type": "Standard_LRS" - } - }, - "fqdn": "gitlab.t3guac.blue.develop.turingsafehaven.ac.uk", - "hostname": "GITLAB", - "ip": "10.153.5.5", - "osVersion": "Ubuntu-latest", - "rootPasswordSecretName": "sre-t3guac-other-gitlab-root-password", - "vmName": "GITLAB-SRE-T3GUAC", - "vmSize": "Standard_D2s_v3" - }, - "rg": "RG_SHM_BLUE_SRE_T3GUAC_WEBAPPS" - } - } -} diff --git a/tests/resources/sre_greent2guac_full_config.json b/tests/resources/sre_greent2guac_full_config.json deleted file mode 100644 index 5e2dc72566..0000000000 --- a/tests/resources/sre_greent2guac_full_config.json +++ /dev/null @@ -1,1469 +0,0 @@ -{ - "shm": { - "azureAdminGroupName": "Safe Haven Test Admins", - "azureAdTenantId": "cb94a6f6-ef7a-42ab-bcad-4f0b887cfd3e", - "dc": { - "adDirectory": "C:\\ActiveDirectory", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "external_dns_resolver": "168.63.129.16", - "fqdn": "dc1-shm-green.green.develop.turingsafehaven.ac.uk", - "hostname": "DC1-SHM-GREEN", - "hostnameLower": "dc1-shm-green", - "hostnameUpper": "DC1-SHM-GREEN", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.4", - "rg": "RG_SHM_GREEN_DC", - "safemodePasswordSecretName": "shm-green-vm-safemode-password-dc", - "vmName": "DC1-SHM-GREEN", - "vmSize": "Standard_D2s_v3" - }, - "dcb": { - "fqdn": "dc2-shm-green.green.develop.turingsafehaven.ac.uk", - "hostname": "DC2-SHM-GREEN", - "ip": "10.0.0.5", - "vmName": "DC2-SHM-GREEN" - }, - "diskTypeDefault": "Standard_LRS", - "dns": { - "rg": "RG_SHM_DNS_TEST", - "subscriptionName": "[Prod] Safe Haven Domains" - }, - "dockerAccount": "youraccountname", - "dockerPassword": "dckr_pat_0o0o0o0o0o0o0o0o0o0o0o0o0o0", - "domain": { - "dn": "DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "green.develop.turingsafehaven.ac.uk", - "fqdnLower": "green.develop.turingsafehaven.ac.uk", - "fqdnUpper": "GREEN.DEVELOP.TURINGSAFEHAVEN.AC.UK", - "netbiosName": "GREEN", - "ous": { - "databaseServers": { - "name": "Secure Research Environment Database Servers", - "path": "OU=Secure Research Environment Database Servers,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "identityServers": { - "name": "Safe Haven Identity Servers", - "path": "OU=Safe Haven Identity Servers,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "linuxServers": { - "name": "Secure Research Environment Linux Servers", - "path": "OU=Secure Research Environment Linux Servers,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "researchUsers": { - "name": "Safe Haven Research Users", - "path": "OU=Safe Haven Research Users,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "securityGroups": { - "name": "Safe Haven Security Groups", - "path": "OU=Safe Haven Security Groups,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - }, - "serviceAccounts": { - "name": "Safe Haven Service Accounts", - "path": "OU=Safe Haven Service Accounts,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk" - } - }, - "securityGroups": { - "computerManagers": { - "description": "SG Safe Haven Computer Management Users", - "name": "SG Safe Haven Computer Management Users" - }, - "serverAdmins": { - "description": "SG Safe Haven Server Administrators", - "name": "SG Safe Haven Server Administrators" - } - } - }, - "firewall": { - "name": "FIREWALL-SHM-GREEN", - "routeTableName": "ROUTE-TABLE-SHM-GREEN" - }, - "id": "green", - "keyVault": { - "name": "kv-shm-green", - "rg": "RG_SHM_GREEN_SECRETS", - "secretNames": { - "aadEmergencyAdminPassword": "shm-green-aad-emergency-admin-password", - "aadEmergencyAdminUsername": "shm-green-aad-emergency-admin-username", - "buildImageAdminPassword": "shm-green-buildimage-admin-password", - "buildImageAdminUsername": "shm-green-buildimage-admin-username", - "domainAdminPassword": "shm-green-domain-admin-password", - "domainAdminUsername": "shm-green-domain-admin-username", - "vmAdminUsername": "shm-green-vm-admin-username", - "vpnCaCertificate": "shm-green-vpn-ca-cert", - "vpnCaCertificatePlain": "shm-green-vpn-ca-cert-plain", - "vpnCaCertPassword": "shm-green-vpn-ca-cert-password", - "vpnClientCertificate": "shm-green-vpn-client-cert", - "vpnClientCertPassword": "shm-green-vpn-client-cert-password" - } - }, - "location": "uksouth", - "monitoring": { - "automationAccount": { - "name": "shm-green-automation" - }, - "loggingWorkspace": { - "name": "shm-green-loganalytics" - }, - "privatelink": { - "name": "shm-green-privatelinkscope" - }, - "rg": "RG_SHM_GREEN_MONITORING", - "updateServers": { - "externalIpAddresses": { - "linux": [ - "72.32.157.246", - "87.238.57.227", - "147.75.85.69", - "217.196.149.55", - "91.189.88.0/21", - "185.125.188.0/22", - "103.21.244.0/22", - "103.22.200.0/22", - "103.31.4.0/22", - "104.16.0.0/13", - "104.24.0.0/14", - "108.162.192.0/18", - "131.0.72.0/22", - "141.101.64.0/18", - "162.158.0.0/15", - "172.64.0.0/13", - "173.245.48.0/20", - "188.114.96.0/20", - "190.93.240.0/20", - "197.234.240.0/22", - "198.41.128.0/17", - "3.10.17.128/25", - "3.101.158.0/23", - "3.11.53.0/24", - "3.128.93.0/24", - "3.134.215.0/24", - "3.231.2.0/25", - "3.234.232.224/27", - "3.236.169.192/26", - "3.236.48.0/23", - "3.35.130.128/25", - "13.113.196.64/26", - "13.113.203.0/24", - "13.124.199.0/24", - "13.210.67.128/26", - "13.224.0.0/14", - "13.228.69.0/24", - "13.233.177.192/26", - "13.249.0.0/16", - "13.32.0.0/15", - "13.35.0.0/16", - "13.48.32.0/24", - "13.54.63.128/26", - "13.59.250.0/26", - "15.158.0.0/16", - "15.188.184.0/24", - "15.207.13.128/25", - "15.207.213.128/25", - "18.154.0.0/15", - "18.160.0.0/15", - "18.164.0.0/15", - "18.172.0.0/15", - "18.192.142.0/23", - "18.200.212.0/23", - "18.216.170.128/25", - "18.229.220.192/26", - "18.238.0.0/15", - "18.244.0.0/15", - "18.64.0.0/14", - "18.68.0.0/16", - "34.195.252.0/24", - "34.216.51.0/25", - "34.223.12.224/27", - "34.223.80.192/26", - "34.226.14.0/24", - "35.158.136.0/24", - "35.162.63.192/26", - "35.167.191.128/26", - "36.103.232.0/25", - "36.103.232.128/26", - "44.227.178.0/24", - "44.234.108.128/25", - "44.234.90.252/30", - "52.124.128.0/17", - "52.15.127.128/26", - "52.199.127.192/26", - "52.212.248.0/26", - "52.220.191.0/26", - "52.222.128.0/17", - "52.46.0.0/18", - "52.47.139.0/24", - "52.52.191.128/26", - "52.56.127.0/25", - "52.57.254.0/24", - "52.66.194.128/26", - "52.78.247.128/26", - "52.82.128.0/19", - "52.84.0.0/15", - "54.182.0.0/16", - "54.192.0.0/16", - "54.230.0.0/17", - "54.230.128.0/18", - "54.230.200.0/21", - "54.230.208.0/20", - "54.230.224.0/19", - "54.233.255.128/26", - "54.239.128.0/18", - "54.239.192.0/19", - "54.240.128.0/18", - "58.254.138.0/25", - "58.254.138.128/26", - "64.252.128.0/18", - "64.252.64.0/18", - "65.8.0.0/16", - "65.9.0.0/17", - "65.9.128.0/18", - "70.132.0.0/18", - "71.152.0.0/17", - "99.79.169.0/24", - "99.84.0.0/16", - "99.86.0.0/16", - "108.138.0.0/15", - "108.156.0.0/14", - "116.129.226.0/25", - "116.129.226.128/26", - "118.193.97.128/25", - "118.193.97.64/26", - "119.147.182.0/25", - "119.147.182.128/26", - "120.232.236.0/25", - "120.232.236.128/26", - "120.253.240.192/26", - "120.253.241.160/27", - "120.253.245.128/26", - "120.253.245.192/27", - "120.52.12.64/26", - "120.52.153.192/26", - "120.52.22.96/27", - "120.52.39.128/27", - "130.176.0.0/17", - "130.176.128.0/18", - "130.176.192.0/19", - "130.176.224.0/20", - "143.204.0.0/16", - "144.220.0.0/16", - "180.163.57.0/25", - "180.163.57.128/26", - "204.246.164.0/22", - "204.246.168.0/22", - "204.246.172.0/24", - "204.246.173.0/24", - "204.246.174.0/23", - "204.246.176.0/20", - "205.251.200.0/21", - "205.251.208.0/20", - "205.251.249.0/24", - "205.251.250.0/23", - "205.251.252.0/23", - "205.251.254.0/24", - "216.137.32.0/19", - "223.71.11.0/27", - "223.71.71.128/25", - "223.71.71.96/27", - "104.131.190.124", - "152.199.20.126", - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ], - "windows": [ - "4.128.0.0/12", - "4.144.0.0/12", - "4.160.0.0/12", - "4.176.0.0/12", - "4.192.0.0/12", - "4.208.0.0/12", - "4.224.0.0/12", - "4.240.0.0/12", - "13.64.0.0/11", - "13.96.0.0/13", - "13.104.0.0/14", - "20.0.0.0/11", - "20.33.0.0/16", - "20.34.0.0/15", - "20.36.0.0/14", - "20.40.0.0/13", - "20.48.0.0/12", - "20.64.0.0/10", - "20.128.0.0/16", - "20.130.0.0/16", - "20.135.0.0/16", - "20.136.0.0/16", - "20.140.0.0/15", - "20.143.0.0/16", - "20.144.0.0/14", - "20.150.0.0/15", - "20.152.0.0/16", - "20.153.0.0/16", - "20.157.0.0/16", - "20.158.0.0/15", - "20.160.0.0/12", - "20.176.0.0/14", - "20.180.0.0/14", - "20.184.0.0/13", - "20.192.0.0/10", - "23.96.0.0/13", - "40.64.0.0/10", - "40.162.0.0/16", - "42.159.0.0/16", - "51.4.0.0/15", - "51.8.0.0/16", - "51.10.0.0/15", - "51.12.0.0/15", - "51.18.0.0/16", - "51.51.0.0/16", - "51.53.0.0/16", - "51.103.0.0/16", - "51.104.0.0/15", - "51.107.0.0/16", - "51.116.0.0/16", - "51.120.0.0/16", - "51.124.0.0/16", - "51.132.0.0/16", - "51.136.0.0/15", - "51.138.0.0/16", - "51.140.0.0/14", - "51.144.0.0/15", - "52.96.0.0/12", - "52.112.0.0/14", - "52.120.0.0/14", - "52.125.0.0/16", - "52.126.0.0/15", - "52.130.0.0/15", - "52.132.0.0/14", - "52.136.0.0/13", - "52.145.0.0/16", - "52.146.0.0/15", - "52.148.0.0/14", - "52.152.0.0/13", - "52.160.0.0/11", - "52.224.0.0/11", - "64.4.0.0/18", - "65.52.0.0/14", - "66.119.144.0/20", - "68.18.0.0/15", - "68.154.0.0/15", - "68.210.0.0/15", - "68.218.0.0/15", - "68.220.0.0/15", - "70.37.0.0/17", - "70.37.128.0/18", - "70.152.0.0/15", - "70.156.0.0/15", - "72.144.0.0/14", - "72.152.0.0/14", - "74.160.0.0/14", - "74.176.0.0/14", - "74.224.0.0/14", - "74.234.0.0/15", - "74.240.0.0/14", - "74.248.0.0/15", - "91.190.216.0/21", - "94.245.64.0/18", - "98.64.0.0/14", - "98.70.0.0/15", - "102.37.0.0/16", - "102.133.0.0/16", - "103.9.8.0/22", - "103.25.156.0/24", - "103.25.157.0/24", - "103.25.158.0/23", - "103.36.96.0/22", - "103.255.140.0/22", - "104.40.0.0/13", - "104.146.0.0/15", - "104.208.0.0/13", - "108.140.0.0/14", - "111.221.16.0/20", - "111.221.64.0/18", - "128.94.0.0/16", - "129.75.0.0/16", - "131.107.0.0/16", - "131.253.1.0/24", - "131.253.3.0/24", - "131.253.5.0/24", - "131.253.6.0/24", - "131.253.8.0/24", - "131.253.12.0/22", - "131.253.16.0/23", - "131.253.18.0/24", - "131.253.21.0/24", - "131.253.22.0/23", - "131.253.24.0/21", - "131.253.32.0/20", - "131.253.61.0/24", - "131.253.62.0/23", - "131.253.64.0/18", - "131.253.128.0/17", - "132.164.0.0/16", - "132.245.0.0/16", - "134.170.0.0/16", - "134.177.0.0/16", - "135.130.0.0/16", - "135.149.0.0/16", - "137.116.0.0/15", - "137.135.0.0/16", - "138.91.0.0/16", - "138.105.0.0/16", - "138.196.0.0/16", - "138.239.0.0/16", - "139.217.0.0/16", - "139.219.0.0/16", - "141.251.0.0/16", - "143.64.0.0/16", - "146.147.0.0/16", - "147.145.0.0/16", - "147.243.0.0/16", - "148.7.0.0/16", - "150.171.0.0/16", - "150.242.48.0/22", - "155.62.0.0/16", - "157.31.0.0/16", - "157.54.0.0/15", - "157.56.0.0/14", - "157.60.0.0/16", - "158.23.0.0/16", - "158.158.0.0/16", - "159.27.0.0/16", - "159.128.0.0/16", - "163.228.0.0/16", - "167.105.0.0/16", - "167.220.0.0/16", - "168.61.0.0/16", - "168.62.0.0/15", - "169.138.0.0/16", - "170.165.0.0/16", - "172.160.0.0/11", - "172.200.0.0/13", - "172.208.0.0/13", - "191.232.0.0/13", - "192.32.0.0/16", - "192.48.225.0/24", - "192.84.159.0/24", - "192.84.160.0/23", - "192.197.157.0/24", - "192.237.67.0/24", - "193.149.64.0/19", - "193.221.113.0/24", - "194.69.96.0/19", - "194.110.197.0/24", - "195.134.224.0/19", - "198.105.232.0/22", - "198.137.97.0/24", - "198.180.95.0/24", - "198.180.96.0/23", - "198.200.130.0/24", - "198.206.164.0/24", - "199.30.16.0/20", - "199.60.28.0/24", - "199.74.210.0/24", - "199.103.90.0/23", - "199.103.122.0/24", - "199.242.32.0/20", - "199.242.48.0/21", - "202.89.224.0/20", - "204.13.120.0/21", - "204.14.180.0/22", - "204.79.135.0/24", - "204.79.179.0/24", - "204.79.181.0/24", - "204.79.188.0/24", - "204.79.195.0/24", - "204.79.196.0/23", - "204.79.252.0/24", - "204.152.18.0/23", - "204.152.140.0/23", - "204.231.192.0/24", - "204.231.194.0/23", - "204.231.197.0/24", - "204.231.198.0/23", - "204.231.200.0/21", - "204.231.208.0/20", - "204.231.236.0/24", - "205.174.224.0/20", - "206.138.168.0/21", - "206.191.224.0/19", - "207.46.0.0/16", - "207.68.128.0/18", - "208.68.136.0/21", - "208.76.44.0/22", - "208.84.0.0/21", - "209.240.192.0/19", - "213.199.128.0/18", - "216.32.180.0/22", - "216.220.208.0/20" - ] - }, - "linux": { - "adminPasswordSecretName": "shm-green-vm-admin-password-linux-update-server", - "disks": { - "os": { - "sizeGb": "64", - "type": "Standard_LRS" - } - }, - "hostname": "LINUX-UPDATES-SHM-GREEN", - "ip": "10.0.3.4", - "vmName": "LINUX-UPDATES-SHM-GREEN", - "vmSize": "Standard_B2ms" - }, - "schedule": { - "daily_definition_updates": { - "hour": "01", - "minute": "01" - }, - "weekly_system_updates": { - "day": "Tuesday", - "hour": "02", - "minute": "02" - } - } - } - }, - "name": "Turing Data Safe Haven (Green)", - "network": { - "vnet": { - "cidr": "10.0.0.0/21", - "name": "VNET_SHM_GREEN", - "rg": "RG_SHM_GREEN_NETWORKING", - "subnets": { - "firewall": { - "cidr": "10.0.2.0/24", - "name": "AzureFirewallSubnet" - }, - "gateway": { - "cidr": "10.0.7.0/24", - "name": "GatewaySubnet" - }, - "identity": { - "cidr": "10.0.0.0/24", - "name": "IdentitySubnet", - "nsg": { - "name": "NSG_SHM_GREEN_IDENTITY", - "rules": "shm-nsg-rules-identity.json" - } - }, - "monitoring": { - "cidr": "10.0.1.0/24", - "name": "MonitoringSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_MONITORING", - "rules": "shm-nsg-rules-monitoring.json" - } - }, - "updateServers": { - "cidr": "10.0.3.0/24", - "name": "UpdateServersSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_UPDATE_SERVERS", - "rules": "shm-nsg-rules-update-servers.json" - } - } - } - }, - "vnetRepositoriesTier2": { - "cidr": "10.10.2.0/24", - "name": "VNET_SHM_GREEN_PACKAGE_REPOSITORIES_TIER_2", - "rg": "RG_SHM_GREEN_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.2.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_DEPLOYMENT_TIER_2", - "rules": "shm-nsg-rules-repository-deployment-tier2.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.2.64/26", - "name": "RepositoryMirrorsExternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_EXTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-external-tier2.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.2.128/26", - "name": "RepositoryMirrorsInternalTier2Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_INTERNAL_TIER2", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier2.json" - } - }, - "proxies": { - "cidr": "10.10.2.192/26", - "name": "RepositoryProxiesTier2Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_PROXIES_TIER_2", - "rules": "shm-nsg-rules-repository-proxies-tier2.json" - } - } - } - }, - "vnetRepositoriesTier3": { - "cidr": "10.10.3.0/24", - "name": "VNET_SHM_GREEN_PACKAGE_REPOSITORIES_TIER_3", - "rg": "RG_SHM_GREEN_NETWORKING", - "subnets": { - "deployment": { - "cidr": "10.10.3.0/26", - "name": "RepositoryDeploymentSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_DEPLOYMENT_TIER_3", - "rules": "shm-nsg-rules-repository-deployment-tier3.json" - } - }, - "mirrorsExternal": { - "cidr": "10.10.3.64/26", - "name": "RepositoryMirrorsExternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_EXTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-external-tier3.json" - } - }, - "mirrorsInternal": { - "cidr": "10.10.3.128/26", - "name": "RepositoryMirrorsInternalTier3Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_MIRRORS_INTERNAL_TIER3", - "rules": "shm-nsg-rules-repository-mirrors-internal-tier3.json" - } - }, - "proxies": { - "cidr": "10.10.3.192/26", - "name": "RepositoryProxiesTier3Subnet", - "nsg": { - "name": "NSG_SHM_GREEN_REPOSITORY_PROXIES_TIER_3", - "rules": "shm-nsg-rules-repository-proxies-tier3.json" - } - } - } - }, - "vpn": { - "cidr": "172.16.201.0/24" - } - }, - "nps": { - "adminPasswordSecretName": "shm-green-vm-admin-password-nps", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "hostname": "NPS-SHM-GREEN", - "installationDirectory": "C:\\Installation", - "ip": "10.0.0.6", - "rg": "RG_SHM_GREEN_NPS", - "vmName": "NPS-SHM-GREEN", - "vmSize": "Standard_D2s_v3" - }, - "nsgPrefix": "NSG_SHM_GREEN", - "organisation": { - "contactEmail": "safehavendevs@turing.ac.uk", - "countryCode": "GB", - "name": "The Alan Turing Institute", - "stateCountyRegion": "London", - "townCity": "London" - }, - "repositories": { - "rg": "RG_SHM_GREEN_PACKAGE_REPOSITORIES", - "tier2": { - "proxies": { - "many": { - "adminPasswordSecretName": "shm-green-vm-admin-password-many-repository-proxy-tier-2", - "applicationAdminPasswordSecretName": "shm-green-application-admin-password-many-repository-proxy-tier-2", - "disks": { - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.2.198", - "vmName": "SHM-GREEN-MANY-REPOSITORY-PROXY-TIER-2", - "vmSize": "Standard_B2ms" - } - } - }, - "tier3": { - "mirrorsExternal": { - "cran": { - "adminPasswordSecretName": "shm-green-vm-admin-password-cran-repository-mirror-external-tier-3", - "disks": { - "data": { - "sizeGb": 32, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.68", - "vmName": "SHM-GREEN-CRAN-REPOSITORY-MIRROR-EXTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - }, - "pypi": { - "adminPasswordSecretName": "shm-green-vm-admin-password-pypi-repository-mirror-external-tier-3", - "disks": { - "data": { - "sizeGb": 1024, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.69", - "vmName": "SHM-GREEN-PYPI-REPOSITORY-MIRROR-EXTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - } - }, - "mirrorsInternal": { - "cran": { - "adminPasswordSecretName": "shm-green-vm-admin-password-cran-repository-mirror-internal-tier-3", - "disks": { - "data": { - "sizeGb": 32, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.132", - "vmName": "SHM-GREEN-CRAN-REPOSITORY-MIRROR-INTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - }, - "pypi": { - "adminPasswordSecretName": "shm-green-vm-admin-password-pypi-repository-mirror-internal-tier-3", - "disks": { - "data": { - "sizeGb": 1024, - "type": "Standard_LRS" - }, - "os": { - "sizeGb": 32, - "type": "Standard_LRS" - } - }, - "ipAddress": "10.10.3.133", - "vmName": "SHM-GREEN-PYPI-REPOSITORY-MIRROR-INTERNAL-TIER-3", - "vmSize": "Standard_B2ms" - } - } - } - }, - "rgPrefix": "RG_SHM_GREEN", - "srdImage": { - "bootdiagnostics": { - "accountName": "vmimagesbootdiagcmtvwnes", - "rg": "RG_VMIMAGES_BOOT_DIAGNOSTICS" - }, - "build": { - "nsg": { - "allowedIpAddresses": [ - "193.60.220.240", - "193.60.220.253" - ], - "name": "NSG_VMIMAGES_BUILD_CANDIDATES", - "rules": "vmimages-nsg-rules-build-candidates.json" - }, - "rg": "RG_VMIMAGES_BUILD_CANDIDATES", - "subnet": { - "cidr": "10.48.0.0/24", - "name": "BuildCandidatesSubnet" - }, - "vm": { - "diskSizeGb": 128, - "diskType": "Standard_LRS", - "size": "Standard_F8s_v2" - }, - "vnet": { - "cidr": "10.48.0.0/16", - "name": "VNET_VMIMAGES" - } - }, - "gallery": { - "name": "DATA_SAFE_HAVEN_SHARED_IMAGES", - "rg": "RG_VMIMAGES_GALLERY" - }, - "images": { - "rg": "RG_VMIMAGES_STORAGE" - }, - "keyVault": { - "name": "kv-shm-green-images", - "rg": "RG_VMIMAGES_SECRETS" - }, - "location": "uksouth", - "network": { - "rg": "RG_VMIMAGES_NETWORKING" - }, - "subscription": "[Prod] Safe Haven VM Images" - }, - "storage": { - "artifacts": { - "accountName": "shmgreenartifactsnrzabxp", - "containers": { - "shmArtifactsDC": "shm-artifacts-dc", - "shmArtifactsNPS": "shm-artifacts-nps", - "shmDesiredState": "shm-desired-state" - }, - "rg": "RG_SHM_GREEN_STORAGE" - }, - "bootdiagnostics": { - "accountName": "shmgreenbootdiagsnrzabxp", - "rg": "RG_SHM_GREEN_STORAGE" - }, - "persistentdata": { - "rg": "RG_SHM_GREEN_PERSISTENT_DATA" - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "time": { - "ntp": { - "serverAddresses": [ - "216.239.35.0", - "216.239.35.4", - "216.239.35.8", - "216.239.35.12" - ], - "serverFqdns": [ - "time.google.com", - "time1.google.com", - "time2.google.com", - "time3.google.com", - "time4.google.com" - ] - }, - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "computerManagers": { - "databaseServers": { - "name": "GREEN Database Servers Manager", - "passwordSecretName": "shm-green-computer-manager-password-database-servers", - "samAccountName": "greendatabasesrvrs" - }, - "identityServers": { - "name": "GREEN Identity Servers Manager", - "passwordSecretName": "shm-green-computer-manager-password-identity-servers", - "samAccountName": "greenidentitysrvrs" - }, - "linuxServers": { - "name": "GREEN Linux Servers Manager", - "passwordSecretName": "shm-green-computer-manager-password-linux-servers", - "samAccountName": "greenlinuxsrvrs" - } - }, - "serviceAccounts": { - "aadLocalSync": { - "name": "GREEN Local AD Sync Administrator", - "passwordSecretName": "shm-green-aad-localsync-password", - "samAccountName": "greenlocaladsync", - "usernameSecretName": "shm-green-aad-localsync-username" - } - } - }, - "vmImagesRgPrefix": "RG_VMIMAGES" - }, - "sre": { - "azureAdminGroupName": "Safe Haven Test Admins", - "backup": { - "blob": { - "policy_name": "blobbackuppolicy" - }, - "disk": { - "policy_name": "diskbackuppolicy" - }, - "rg": "RG_SHM_GREEN_SRE_T2GUAC_BACKUP", - "vault": { - "name": "bv-green-sre-t2guac" - } - }, - "databases": { - "enabled": true, - "instances": [ - { - "adminPasswordSecretName": "sre-t2guac-vm-admin-password-mssql", - "dbAdminPasswordSecretName": "sre-t2guac-db-admin-password-mssql", - "dbAdminUsernameSecretName": "sre-t2guac-db-admin-username-mssql", - "disks": { - "data": { - "sizeGb": "1024", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "enableSSIS": true, - "ip": "10.152.3.4", - "port": "1433", - "sku": "sqldev-gen2", - "subnet": "databases", - "type": "MSSQL", - "vmName": "MSSQL-T2GUAC", - "vmSize": "Standard_DS2_v2" - }, - { - "adminPasswordSecretName": "sre-t2guac-vm-admin-password-postgresql", - "dbAdminPasswordSecretName": "sre-t2guac-db-admin-password-postgresql", - "dbAdminUsernameSecretName": "sre-t2guac-db-admin-username-postgresql", - "disks": { - "data": { - "sizeGb": "1024", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "ip": "10.152.3.5", - "port": "5432", - "sku": "Ubuntu-latest", - "subnet": "databases", - "type": "PostgreSQL", - "vmName": "PSTGRS-T2GUAC", - "vmSize": "Standard_DS2_v2" - } - ], - "rg": "RG_SHM_GREEN_SRE_T2GUAC_DATABASES" - }, - "diskTypeDefault": "Standard_LRS", - "domain": { - "dn": "DC=t2guac,DC=green,DC=develop,DC=turingsafehaven,DC=ac,DC=uk", - "fqdn": "t2guac.green.develop.turingsafehaven.ac.uk", - "netbiosName": "T2GUAC", - "securityGroups": { - "dataAdministrators": { - "description": "SG T2GUAC Data Administrators", - "name": "SG T2GUAC Data Administrators" - }, - "researchUsers": { - "description": "SG T2GUAC Research Users", - "name": "SG T2GUAC Research Users" - }, - "systemAdministrators": { - "description": "SG T2GUAC System Administrators", - "name": "SG T2GUAC System Administrators" - } - } - }, - "firewall": { - "routeTableName": "ROUTE-TABLE-SRE-T2GUAC" - }, - "id": "t2guac", - "keyVault": { - "name": "kv-green-sre-t2guac", - "rg": "RG_SHM_GREEN_SRE_T2GUAC_SECRETS", - "secretNames": { - "adminUsername": "sre-t2guac-vm-admin-username", - "letsEncryptCertificate": "sre-t2guac-lets-encrypt-certificate", - "npsSecret": "sre-t2guac-other-nps-secret" - } - }, - "location": "uksouth", - "network": { - "vnet": { - "cidr": "10.152.0.0/21", - "name": "VNET_SHM_GREEN_SRE_T2GUAC", - "rg": "RG_SHM_GREEN_SRE_T2GUAC_NETWORKING", - "subnets": { - "compute": { - "cidr": "10.152.4.0/24", - "name": "ComputeSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_SRE_T2GUAC_COMPUTE", - "rules": "sre-nsg-rules-compute.json" - } - }, - "data": { - "cidr": "10.152.2.0/24", - "name": "PrivateDataSubnet" - }, - "databases": { - "cidr": "10.152.3.0/24", - "name": "DatabasesSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_SRE_T2GUAC_DATABASES", - "rules": "sre-nsg-rules-databases.json" - } - }, - "deployment": { - "cidr": "10.152.0.0/24", - "name": "DeploymentSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_SRE_T2GUAC_DEPLOYMENT", - "rules": "sre-nsg-rules-deployment.json" - } - }, - "remoteDesktop": { - "cidr": "10.152.1.0/24", - "name": "RemoteDesktopSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_SRE_T2GUAC_GUACAMOLE", - "rules": "sre-nsg-rules-guacamole.json" - } - }, - "webapps": { - "cidr": "10.152.5.0/24", - "name": "WebappsSubnet", - "nsg": { - "name": "NSG_SHM_GREEN_SRE_T2GUAC_WEBAPPS", - "rules": "sre-nsg-rules-webapps.json" - } - } - } - } - }, - "nsgPrefix": "NSG_SHM_GREEN_SRE_T2GUAC", - "remoteDesktop": { - "guacamole": { - "adminPasswordSecretName": "sre-t2guac-vm-admin-password-guacamole", - "databaseAdminPasswordSecretName": "sre-t2guac-db-admin-password-guacamole", - "disks": { - "os": { - "sizeGb": "128", - "type": "Standard_LRS" - } - }, - "fqdn": "guacamole-sre-t2guac.green.develop.turingsafehaven.ac.uk", - "hostname": "GUACAMOLE-SRE-T2GUAC", - "ip": "10.152.1.4", - "vmName": "GUACAMOLE-SRE-T2GUAC", - "vmSize": "Standard_DS2_v2" - }, - "networkRules": { - "allowedSources": "193.60.220.253", - "copyAllowed": false, - "includeAzurePlatformDnsRule": true, - "outboundInternet": "Deny", - "pasteAllowed": false - }, - "provider": "ApacheGuacamole", - "rg": "RG_SHM_GREEN_SRE_T2GUAC_REMOTE_DESKTOP" - }, - "repositories": { - "cran": { - "url": "http://10.10.2.198:80/repository/cran-proxy" - }, - "network": { - "cidr": "10.10.2.0/24", - "name": "VNET_SHM_GREEN_PACKAGE_REPOSITORIES_TIER_2" - }, - "pypi": { - "host": "10.10.2.198", - "index": "http://10.10.2.198:80/repository/pypi-proxy", - "indexUrl": "http://10.10.2.198:80/repository/pypi-proxy/simple" - } - }, - "rgPrefix": "RG_SHM_GREEN_SRE_T2GUAC", - "shortName": "sre-t2guac", - "srd": { - "adminPasswordSecretName": "sre-t2guac-vm-admin-password-compute", - "disks": { - "os": { - "sizeGb": "default", - "type": "StandardSSD_LRS" - }, - "scratch": { - "sizeGb": "1024", - "type": "StandardSSD_LRS" - } - }, - "rg": "RG_SHM_GREEN_SRE_T2GUAC_COMPUTE", - "vmImage": { - "type": "Ubuntu", - "version": "20.04.2024032600" - }, - "vmSizeDefault": "Standard_D2s_v3" - }, - "storage": { - "accessPolicies": { - "readOnly": { - "permissions": "rl" - }, - "readWrite": { - "permissions": "racwdl" - } - }, - "artifacts": { - "account": { - "accessTier": "Cool", - "allowedIpAddresses": "any", - "name": "greent2guacartifactsnrza", - "performance": "Standard_GRS", - "storageKind": "BlobStorage" - }, - "containers": { - "sreArtifactsRDS": "sre-artifacts-rds", - "sreScriptsRDS": "sre-scripts-rds" - }, - "rg": "RG_SHM_GREEN_SRE_T2GUAC_STORAGE" - }, - "bootdiagnostics": { - "accountName": "greent2guacbootdiagsnrza", - "rg": "RG_SHM_GREEN_SRE_T2GUAC_STORAGE" - }, - "persistentdata": { - "account": { - "accessTier": "Hot", - "allowedIpAddresses": "193.60.220.253", - "name": "greent2guacdatanrzabxpji", - "performance": "Standard_GRS", - "storageKind": "StorageV2" - }, - "containers": { - "backup": { - "accessPolicyName": "readWrite", - "connectionSecretName": "sre-t2guac-data-backup-connection-readwrite", - "mountType": "BlobSMB" - }, - "egress": { - "accessPolicyName": "readWrite", - "connectionSecretName": "sre-t2guac-data-egress-connection-readwrite", - "mountType": "BlobSMB" - }, - "ingress": { - "accessPolicyName": "readOnly", - "connectionSecretName": "sre-t2guac-data-ingress-connection-readonly", - "mountType": "BlobSMB" - } - } - }, - "userdata": { - "account": { - "accessTier": "Hot", - "name": "greent2guacuserdatanrzab", - "performance": "Premium_ZRS", - "rg": "RG_SHM_GREEN_SRE_T2GUAC_STORAGE", - "storageKind": "FileStorage" - }, - "containers": { - "home": { - "accessPolicyName": "readWrite", - "mountType": "NFS", - "sizeGb": "1024" - }, - "shared": { - "accessPolicyName": "readWrite", - "mountType": "NFS", - "sizeGb": "1024" - } - } - } - }, - "storageTypeDefault": "Standard_GRS", - "subscriptionName": "Data Safe Haven Development", - "tier": "2", - "time": { - "timezone": { - "linux": "Europe/London", - "windows": "GMT Standard Time" - } - }, - "users": { - "serviceAccounts": { - "ldapSearch": { - "name": "T2GUAC LDAP Search Service Account", - "passwordSecretName": "sre-t2guac-other-service-account-password-ldap-search", - "samAccountName": "t2guacldapsearch" - }, - "postgres": { - "name": "T2GUAC Postgres DB Service Account", - "passwordSecretName": "sre-t2guac-db-service-account-password-postgres", - "samAccountName": "t2guacdbpostgres" - } - } - }, - "webapps": { - "codimd": { - "adminPasswordSecretName": "sre-t2guac-vm-admin-password-codimd", - "codimd": { - "dockerVersion": "2.5.3" - }, - "disks": { - "data": { - "sizeGb": "512", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "32", - "type": "Standard_LRS" - } - }, - "fqdn": "codimd.t2guac.green.develop.turingsafehaven.ac.uk", - "hostname": "CODIMD", - "ip": "10.152.5.6", - "osVersion": "Ubuntu-latest", - "postgres": { - "dockerVersion": "16-alpine", - "passwordSecretName": "sre-t2guac-other-codimd-password-postgresdb" - }, - "vmName": "CODIMD-SRE-T2GUAC", - "vmSize": "Standard_D2s_v3" - }, - "gitlab": { - "adminPasswordSecretName": "sre-t2guac-vm-admin-password-gitlab", - "disks": { - "data": { - "sizeGb": "512", - "type": "Standard_LRS" - }, - "os": { - "sizeGb": "32", - "type": "Standard_LRS" - } - }, - "fqdn": "gitlab.t2guac.green.develop.turingsafehaven.ac.uk", - "hostname": "GITLAB", - "ip": "10.152.5.5", - "osVersion": "Ubuntu-latest", - "rootPasswordSecretName": "sre-t2guac-other-gitlab-root-password", - "vmName": "GITLAB-SRE-T2GUAC", - "vmSize": "Standard_D2s_v3" - }, - "rg": "RG_SHM_GREEN_SRE_T2GUAC_WEBAPPS" - } - } -} diff --git a/tests/serialisers/test_azure_serialisable_model.py b/tests/serialisers/test_azure_serialisable_model.py new file mode 100644 index 0000000000..093d4f70d9 --- /dev/null +++ b/tests/serialisers/test_azure_serialisable_model.py @@ -0,0 +1,139 @@ +from pytest import fixture, raises + +from data_safe_haven.exceptions import ( + DataSafeHavenConfigError, + DataSafeHavenTypeError, +) +from data_safe_haven.external import AzureSdk +from data_safe_haven.serialisers import AzureSerialisableModel + + +class ExampleAzureSerialisableModel(AzureSerialisableModel): + config_type = "Example" + default_filename = "file.yaml" + string: str + integer: int + list_of_integers: list[int] + + +@fixture +def example_config_class(): + return ExampleAzureSerialisableModel( + string="hello", integer=5, list_of_integers=[1, 2, 3] + ) + + +@fixture +def example_config_yaml(): + return "\n".join(["string: 'hello'", "integer: 5", "list_of_integers: [1,2,3]"]) + + +class TestAzureSerialisableModel: + def test_constructor(self, example_config_class): + assert isinstance(example_config_class, ExampleAzureSerialisableModel) + assert isinstance(example_config_class, AzureSerialisableModel) + assert example_config_class.string == "hello" + + def test_remote_yaml_diff(self, mocker, example_config_class, context): + mocker.patch.object( + AzureSdk, "download_blob", return_value=example_config_class.to_yaml() + ) + diff = example_config_class.remote_yaml_diff(context) + assert not diff + assert diff == [] + + def test_remote_yaml_diff_difference(self, mocker, example_config_class, context): + mocker.patch.object( + AzureSdk, "download_blob", return_value=example_config_class.to_yaml() + ) + example_config_class.integer = 0 + example_config_class.string = "abc" + + diff = example_config_class.remote_yaml_diff(context) + + assert isinstance(diff, list) + assert diff == [ + "--- remote\n", + "+++ local\n", + "@@ -1,6 +1,6 @@\n", + "-integer: 5\n", + "+integer: 0\n", + " list_of_integers:\n", + " - 1\n", + " - 2\n", + " - 3\n", + "-string: hello\n", + "+string: abc\n", + ] + + def test_to_yaml(self, example_config_class): + yaml = example_config_class.to_yaml() + assert isinstance(yaml, str) + assert "string: hello" in yaml + assert "integer: 5" in yaml + assert "config_type" not in yaml + + def test_upload(self, mocker, example_config_class, context): + mock_method = mocker.patch.object(AzureSdk, "upload_blob", return_value=None) + example_config_class.upload(context) + + mock_method.assert_called_once_with( + example_config_class.to_yaml(), + "file.yaml", + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) + + def test_from_yaml(self, example_config_yaml): + example_config_class = ExampleAzureSerialisableModel.from_yaml( + example_config_yaml + ) + assert isinstance(example_config_class, ExampleAzureSerialisableModel) + assert isinstance(example_config_class, AzureSerialisableModel) + assert example_config_class.string == "hello" + assert example_config_class.integer == 5 + assert example_config_class.list_of_integers == [1, 2, 3] + + def test_from_yaml_invalid_yaml(self): + yaml = "\n".join(["string: 'abc'", "integer: -3", "list_of_integers: [-1,0,1"]) + with raises( + DataSafeHavenConfigError, + match="Could not parse Example configuration as YAML.", + ): + ExampleAzureSerialisableModel.from_yaml(yaml) + + def test_from_yaml_not_dict(self): + yaml = """42""" + with raises( + DataSafeHavenConfigError, + match="Unable to parse Example configuration as a dict.", + ): + ExampleAzureSerialisableModel.from_yaml(yaml) + + def test_from_yaml_validation_error(self): + yaml = "\n".join( + ["string: 'abc'", "integer: 'not an integer'", "list_of_integers: [-1,0,1]"] + ) + + with raises( + DataSafeHavenTypeError, + match="Could not load Example configuration.", + ): + ExampleAzureSerialisableModel.from_yaml(yaml) + + def test_from_remote(self, mocker, context, example_config_yaml): + mock_method = mocker.patch.object( + AzureSdk, "download_blob", return_value=example_config_yaml + ) + example_config = ExampleAzureSerialisableModel.from_remote(context) + + assert isinstance(example_config, ExampleAzureSerialisableModel) + assert example_config.string == "hello" + + mock_method.assert_called_once_with( + "file.yaml", + context.resource_group_name, + context.storage_account_name, + context.storage_container_name, + ) diff --git a/tests/serialisers/test_yaml_serialisable_model.py b/tests/serialisers/test_yaml_serialisable_model.py new file mode 100644 index 0000000000..cd27ea612b --- /dev/null +++ b/tests/serialisers/test_yaml_serialisable_model.py @@ -0,0 +1,123 @@ +from pytest import fixture, raises + +from data_safe_haven.exceptions import ( + DataSafeHavenConfigError, + DataSafeHavenTypeError, +) +from data_safe_haven.serialisers import YAMLSerialisableModel + + +class ExampleYAMLSerialisableModel(YAMLSerialisableModel): + config_type = "Example" + string: str + integer: int + list_of_integers: list[int] + + +@fixture +def example_config_class(): + return ExampleYAMLSerialisableModel( + string="hello", integer=5, list_of_integers=[1, 2, 3] + ) + + +@fixture +def example_config_yaml(): + return "\n".join(["string: 'hello'", "integer: 5", "list_of_integers: [1,2,3]"]) + + +class TestYAMLSerialisableModel: + def test_constructor(self, example_config_class): + assert isinstance(example_config_class, ExampleYAMLSerialisableModel) + assert isinstance(example_config_class, YAMLSerialisableModel) + assert example_config_class.string == "hello" + + def test_from_filepath(self, tmp_path, example_config_yaml): + filepath = tmp_path / "test.yaml" + filepath.write_text(example_config_yaml) + example_config_class = ExampleYAMLSerialisableModel.from_filepath(filepath) + assert isinstance(example_config_class, ExampleYAMLSerialisableModel) + assert isinstance(example_config_class, YAMLSerialisableModel) + assert example_config_class.string == "hello" + assert example_config_class.integer == 5 + assert example_config_class.list_of_integers == [1, 2, 3] + + def test_from_yaml(self, example_config_yaml): + example_config_class = ExampleYAMLSerialisableModel.from_yaml( + example_config_yaml + ) + assert isinstance(example_config_class, ExampleYAMLSerialisableModel) + assert isinstance(example_config_class, YAMLSerialisableModel) + assert example_config_class.string == "hello" + assert example_config_class.integer == 5 + assert example_config_class.list_of_integers == [1, 2, 3] + + def test_from_yaml_invalid_yaml(self): + yaml = "\n".join(["string: 'abc'", "integer: -3", "list_of_integers: [-1,0,1"]) + with raises( + DataSafeHavenConfigError, + match="Could not parse Example configuration as YAML.", + ): + ExampleYAMLSerialisableModel.from_yaml(yaml) + + def test_from_yaml_not_dict(self): + yaml = """42""" + with raises( + DataSafeHavenConfigError, + match="Unable to parse Example configuration as a dict.", + ): + ExampleYAMLSerialisableModel.from_yaml(yaml) + + def test_from_yaml_validation_error(self): + yaml = "\n".join( + ["string: 'abc'", "integer: 'not an integer'", "list_of_integers: [-1,0,1]"] + ) + with raises( + DataSafeHavenTypeError, + match="Could not load Example configuration.", + ): + ExampleYAMLSerialisableModel.from_yaml(yaml) + + def test_to_filepath(self, tmp_path, example_config_class): + filepath = tmp_path / "test.yaml" + example_config_class.to_filepath(filepath) + contents = filepath.read_text().split("\n") + assert "string: hello" in contents + assert "integer: 5" in contents + assert "list_of_integers:" in contents + assert "- 1" in contents + assert "- 2" in contents + assert "- 3" in contents + + def test_to_yaml(self, example_config_class): + yaml = example_config_class.to_yaml() + assert isinstance(yaml, str) + assert "string: hello" in yaml + assert "integer: 5" in yaml + assert "config_type" not in yaml + + def test_yaml_diff(self, example_config_class): + other = example_config_class.model_copy(deep=True) + diff = example_config_class.yaml_diff(other) + assert not diff + assert diff == [] + + def test_yaml_diff_difference(self, example_config_class): + other = example_config_class.model_copy(deep=True) + other.integer = 3 + other.string = "abc" + diff = example_config_class.yaml_diff(other) + assert isinstance(diff, list) + assert diff == [ + "--- other\n", + "+++ self\n", + "@@ -1,6 +1,6 @@\n", + "-integer: 3\n", + "+integer: 5\n", + " list_of_integers:\n", + " - 1\n", + " - 2\n", + " - 3\n", + "-string: abc\n", + "+string: hello\n", + ] diff --git a/tests/srd_smoke_tests/README.md b/tests/srd_smoke_tests/README.md deleted file mode 100644 index 62451bfb28..0000000000 --- a/tests/srd_smoke_tests/README.md +++ /dev/null @@ -1,289 +0,0 @@ -# Tests - -## Prerequisites - -The package installation tests require the following layout: - -```none -├── README.md -├── run_all_tests.bats -├── package_lists -│ ├── packages-julia.list -│ ├── packages-python-3.10.2.list -│ ├── packages-python-3.8.12.list -│ ├── packages-python-3.9.10.list -│ ├── packages-python-system.txt -│ ├── packages-r-bioconductor.list -│ └── packages-r-cran.list -└── tests - ├── test_databases_python.py - ├── test_databases_R.R - ├── test_databases.sh - ├── test_functionality_julia.jl - ├── test_functionality_python.py - ├── test_functionality_R.R - ├── test_mounted_drives.sh - ├── test_packages_installed_julia.jl - ├── test_packages_installed_python.py - ├── test_packages_installed_R.R - ├── test_repository_python.sh - ├── test_repository_R.sh - └── test_virtual_environments_python.sh -``` - -## Test everything - -You can run all the non-interactive tests using - -```bash -bats run_all_tests.bats -``` - -Alternatively, you can run individual tests as described below. - -In order to test Jupyter run the following for each Python version you want to test: - -```bash -pyenv shell -jupyter notebook -``` - -and run `test_jupyter.ipynb` ensuring that the detected Python version matches throughout. - -## Testing mounted drives - -In order to test that all remote drives are correctly mounted you can run the following for each drive you want to test: - -```bash -> bash tests/test_mounted_drives.sh -d -``` - -The expected output for a successful test is: - -```none -All tests passed for '' -``` - -## Julia - -The installed Julia version can be seen by running `julia --version`. - -### Testing whether all packages are installed - -Run the tests with: - -```bash -> julia tests/test_packages_installed_julia.jl -``` - -The installation check will take several minutes to run. -The expected output for a successful test is: - -```none -Testing Julia packages -[several messages of the form: Testing '' ...] -All packages are installed -``` - -### Minimal functionality testing - -Run the minimal functionality tests with: - -```bash -> julia tests/test_functionality_julia.jl -``` - -The expected output for a successful test is: - -```none -All functionality tests passed -``` - -## Python - -The list of available Python versions can be seen by typing `pyenv versions` -For each of the Python versions that you want to test (eg. 3.8.x, 3.9.x, 3.10.x), activate the appropriate version with `pyenv shell `. - -### Testing whether all packages are installed - -Run the tests with: - -```bash -> pyenv shell -> python tests/test_packages_installed_python.py -``` - -The installation check will take several minutes to run. -The expected output for a successful test is: - -```none -Python version found -Testing Python packages -[several messages of the form: Testing '' ...] -Tensorflow can see the following devices: [] -All packages are installed -``` - -The message `CUDA_ERROR_NO_DEVICE: no CUDA capable device is detected` is **not** expected if you are using a GPU-enabled VM e.g. NC series - -### Minimal functionality testing - -Run the minimal functionality tests with: - -```bash -> python tests/test_functionality_python.py -``` - -The expected output for a successful test is: - -```none -Logistic model ran OK -All functionality tests passed -``` - -### Testing package mirrors - -To test the PyPI mirror run: - -```bash -> bash tests/test_repository_python.sh -``` - -This will attempt to install a few packages from the internal PyPI mirror. -The expected output for a successful test is: - -```none -Successfully installed pip-21.3.1 -Attempting to install absl-py... -... absl-py installation succeeded -Attempting to install zope.interface... -... zope.interface installation succeeded -All packages installed successfully -``` - -### Testing databases - -To test database connectivity you will need to know the connection details and can then run something like: - -```none -> python tests/test_databases_python.py --db-type mssql --db-name master --port 1433 --server-name MSSQL-T3GUAC.testc.dsgroupdev.co.uk -``` - -This will attempt to connect to the relevant database server -The expected output for a successful test is: - -```none -Attempting to connect to 'master' on 'MSSQL-T3GUAC.testc.dsgroupdev.co.uk' via port 1433 - TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE -0 master dbo spt_fallback_db BASE TABLE -1 master dbo spt_fallback_dev BASE TABLE -2 master dbo spt_fallback_usg BASE TABLE -3 master dbo spt_values VIEW -4 master dbo spt_monitor BASE TABLE -All database tests passed -``` - -### Testing Python virtual environments - -To test the creation and management of virtual environments run the following for each Python version you want to test: - -```bash -> bash -i tests/test_virtual_environments_python.sh -``` - -This will attempt to create, use and destroy a `pyenv` virtual environment. -The expected output for a successful test is: - -```none -Preparing to test Python 3.8.12 with virtual environment 3.8.12-test -[✔] Testing that pyenv exists -[✔] Testing pyenv versions -[✔] Testing pyenv virtualenvs -[✔] Testing virtualenv creation -[✔] Testing virtualenv activation -[✔] Testing Python version -[✔] Testing virtualenv packages -[✔] Testing virtualenv package installation -[✔] Testing virtualenv deletion -All tests passed for Python 3.8.12 -``` - -## R - -The installed R version can be seen by running `R --version`. - -### Testing whether all packages are installed - -Run the tests with: - -```bash -> Rscript tests/test_packages_installed_R.R -``` - -The installation check will take several minutes to run. -The expected output for a successful test is: - -```none -[1] "Testing CRAN packages" -[several messages of the form: [1] "Testing '' ..."] -[1] "Testing Bioconductor packages" -[several messages of the form: [1] "Testing '' ..."] -[1] "All packages are installed" -``` - -### Minimal functionality testing - -Run the minimal functionality tests with: - -```bash -> Rscript tests/test_functionality_R.R -``` - -The expected output for a successful test is: - -```none -[1] "Logistic regression ran OK" -[1] "Clustering ran OK" -[1] "All functionality tests passed" -``` - -### Testing package mirrors - -To test the CRAN mirror run: - -```bash -> bash tests/test_repository_R.sh -``` - -This will attempt to install a few test packages from the internal CRAN mirror. -The expected output for a successful test is: - -```none -Attempting to install argon2... -... argon2 installation succeeded -Attempting to install zeallot... -... zeallot installation succeeded -All packages installed successfully -``` - -### Testing databases - -To test database connectivity you will need to know the connection details and can then run something like: - -```none -> Rscript tests/test_databases_R.R mssql master 1433 MSSQL-T3GUAC.testc.dsgroupdev.co.uk -``` - -This will attempt to connect to the relevant database server -The expected output for a successful test is: - -```none -[1] "Attempting to connect to 'master' on 'MSSQL-T3GUAC.testc.dsgroupdev.co.uk' via port '1433" - TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE -1 master dbo spt_fallback_db BASE TABLE -2 master dbo spt_fallback_dev BASE TABLE -3 master dbo spt_fallback_usg BASE TABLE -4 master dbo spt_values VIEW -5 master dbo spt_monitor BASE TABLE -[1] "All database tests passed" -``` diff --git a/tests/srd_smoke_tests/run_all_tests.mustache.bats b/tests/srd_smoke_tests/run_all_tests.mustache.bats deleted file mode 100644 index c3237fdd1d..0000000000 --- a/tests/srd_smoke_tests/run_all_tests.mustache.bats +++ /dev/null @@ -1,178 +0,0 @@ -#! /usr/bin/env bats -load "../bats/bats-assert/load" -load "../bats/bats-file/load" -load "../bats/bats-support/load" - - -# Helper functions -# ---------------- -get_full_python_version() { - pyenv versions --bare --skip-aliases | grep -v "/" | grep "${1}." | tail -n 1 | xargs -} - -setup_python() { - eval "$(pyenv init - --no-rehash)" - pyenv shell "$(get_full_python_version $1)" - run python --version - assert_output --partial "${1}." -} - -test_python_functionality() { - setup_python "$1" - run python tests/test_functionality_python.py 2>&1 - assert_output --partial 'All functionality tests passed' - pyenv shell --unset -} - -test_python_packages() { - setup_python "$1" - run python tests/test_packages_installed_python.py 2> /dev/null - assert_output --regexp 'All [0-9]+ packages are installed' - pyenv shell --unset -} - -test_python_repository() { - setup_python "$1" - run bash tests/test_repository_python.sh 2>&1 - assert_output --partial 'All package installations behaved as expected' - pyenv shell --unset -} - -test_python_virtual_environments() { - PYTHON_VERSION="$(get_full_python_version $1)" - # This script must run in interactive mode to ensure that pyenv setup commands are run - run bash -i tests/test_virtual_environments_python.sh $PYTHON_VERSION 2>&1 - assert_output --partial "All tests passed for Python $PYTHON_VERSION" -} - -# Julia -# ----- -# Test Julia packages -@test "Julia packages" { - run julia tests/test_packages_installed_julia.jl 2>&1 - assert_output --regexp 'All [0-9]+ packages are installed' -} - -# Test Julia functionality -@test "Julia functionality" { - run julia tests/test_functionality_julia.jl 2>&1 - assert_output --partial 'All functionality tests passed' -} - - -# Python -# ------ -# Test Python {{SmokeTests.Python_v0}} -@test "Python packages ({{SmokeTests.Python_v0}})" { - test_python_packages '{{SmokeTests.Python_v0}}' -} -@test "Python functionality ({{SmokeTests.Python_v0}})" { - test_python_functionality '{{SmokeTests.Python_v0}}' -} -@test "Python virtual environments ({{SmokeTests.Python_v0}})" { - test_python_virtual_environments '{{SmokeTests.Python_v0}}' -} -@test "Python package repository ({{SmokeTests.Python_v0}})" { - test_python_repository '{{SmokeTests.Python_v0}}' -} - -# Test Python {{SmokeTests.Python_v1}} -@test "Python packages ({{SmokeTests.Python_v1}})" { - test_python_packages '{{SmokeTests.Python_v1}}' -} -# Test Python functionality -@test "Python functionality ({{SmokeTests.Python_v1}})" { - test_python_functionality '{{SmokeTests.Python_v1}}' -} -@test "Python virtual environments ({{SmokeTests.Python_v1}})" { - test_python_virtual_environments '{{SmokeTests.Python_v1}}' -} -@test "Python package repository ({{SmokeTests.Python_v1}})" { - test_python_repository '{{SmokeTests.Python_v1}}' -} - -# Test Python {{SmokeTests.Python_v2}} -@test "Python packages ({{SmokeTests.Python_v2}})" { - test_python_packages '{{SmokeTests.Python_v2}}' -} -@test "Python functionality ({{SmokeTests.Python_v2}})" { - test_python_functionality '{{SmokeTests.Python_v2}}' -} -@test "Python virtual environments ({{SmokeTests.Python_v2}})" { - test_python_virtual_environments '{{SmokeTests.Python_v2}}' -} -@test "Python package repository ({{SmokeTests.Python_v2}})" { - test_python_repository '{{SmokeTests.Python_v2}}' -} - - -# R -# - -# Test R packages -@test "R packages" { - run Rscript tests/test_packages_installed_R.R 2>&1 - assert_output --regexp 'All [0-9]+ packages are installed' -} - -# Test R functionality -@test "R functionality" { - run Rscript tests/test_functionality_R.R - assert_output --partial 'All functionality tests passed' -} - -# Test R package repository -@test "R package repository" { - run bash tests/test_repository_R.sh - assert_output --partial 'All package installations behaved as expected' -} - - -# Databases -# --------- -{{#SmokeTests.MSSQLExists}} -# Test MS SQL database -@test "MS SQL database (Python)" { - run bash tests/test_databases.sh -d mssql -l python - assert_output --partial 'All database tests passed' -} -@test "MS SQL database (R)" { - run bash tests/test_databases.sh -d mssql -l R - assert_output --partial 'All database tests passed' -} -{{/SmokeTests.MSSQLExists}} - -{{#SmokeTests.PostgreSQLExists}} -# Test Postgres database -@test "Postgres database (Python)" { - run bash tests/test_databases.sh -d postgres -l python - assert_output --partial 'All database tests passed' -} -@test "Postgres database (R)" { - run bash tests/test_databases.sh -d postgres -l R - assert_output --partial 'All database tests passed' -} -{{/SmokeTests.PostgreSQLExists}} - - -# Mounted drives -# -------------- -@test "Mounted drives (/data)" { - run bash tests/test_mounted_drives.sh -d data - assert_output --partial 'All tests passed' -} -@test "Mounted drives (/home)" { - run bash tests/test_mounted_drives.sh -d home - assert_output --partial 'All tests passed' -} -@test "Mounted drives (/output)" { - run bash tests/test_mounted_drives.sh -d output - assert_output --partial 'All tests passed' -} -@test "Mounted drives (/shared)" { - run bash tests/test_mounted_drives.sh -d shared - assert_output --partial 'All tests passed' -} -@test "Mounted drives (/scratch)" { - run bash tests/test_mounted_drives.sh -d scratch - assert_output --partial 'All tests passed' -} diff --git a/tests/srd_smoke_tests/test_databases.mustache.sh b/tests/srd_smoke_tests/test_databases.mustache.sh deleted file mode 100644 index 07805ae69c..0000000000 --- a/tests/srd_smoke_tests/test_databases.mustache.sh +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/bash -db_type="" -language="" -while getopts d:l: flag; do - case "${flag}" in - d) db_type=${OPTARG};; - l) language=${OPTARG};; - *) echo "Invalid option ${OPTARG}"; exit 1;; - esac -done - -if [ "$db_type" == "mssql" ]; then - db_name="master" - port="{{SmokeTests.MSSQLPort}}" - server_name="{{SmokeTests.MSSQLVMName}}.{{shm.domain.fqdn}}" -elif [ "$db_type" == "postgres" ]; then - db_name="postgres" - port="{{SmokeTests.PostgreSQLPort}}" - server_name="{{SmokeTests.PostgreSQLVMName}}.{{shm.domain.fqdn}}" -else - echo "Did not recognise database type '$db_type'" -fi - -if [ $port == "" ]; then - echo "Database type '$db_type' is not part of this SRE" - echo "All database tests passed" -else - script_path=$(dirname "$(readlink -f "$0")") - if [ "$language" == "python" ]; then - python "${script_path}"/test_databases_python.py --db-type "$db_type" --db-name "$db_name" --port "$port" --server-name "$server_name" - elif [ "$language" == "R" ]; then - Rscript "${script_path}"/test_databases_R.R "$db_type" "$db_name" "$port" "$server_name" - fi -fi diff --git a/tests/srd_smoke_tests/test_databases_python.py b/tests/srd_smoke_tests/test_databases_python.py deleted file mode 100644 index 2e6614ecef..0000000000 --- a/tests/srd_smoke_tests/test_databases_python.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -import argparse -import psycopg2 -import pyodbc -import pandas as pd - - -def test_database(server_name, port, db_type, db_name): - print(f"Attempting to connect to '{db_name}' on '{server_name}' via port {port}") - if db_type == "mssql": - cnxn = pyodbc.connect( - f"DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={server_name},{port};DATABASE={db_name};Trusted_Connection=yes;" - ) - elif db_type == "postgres": - cnxn = psycopg2.connect(host=server_name, port=port, database=db_name) - df = pd.read_sql("SELECT * FROM information_schema.tables;", cnxn) - if df.size: - print(df.head(5)) - print("All database tests passed") - - -# Parse command line arguments -parser = argparse.ArgumentParser() -parser.add_argument( - "-d", - "--db-type", - type=str, - choices=["mssql", "postgres"], - help="Which database type to use", -) -parser.add_argument("-n", "--db-name", type=str, help="Which database to connect to") -parser.add_argument("-p", "--port", type=str, help="Which port to connect to") -parser.add_argument("-s", "--server-name", type=str, help="Which server to connect to") -args = parser.parse_args() - -# Run database test -test_database(args.server_name, args.port, args.db_type, args.db_name) diff --git a/tests/srd_smoke_tests/test_functionality_julia.jl b/tests/srd_smoke_tests/test_functionality_julia.jl deleted file mode 100644 index d8080f9f58..0000000000 --- a/tests/srd_smoke_tests/test_functionality_julia.jl +++ /dev/null @@ -1,29 +0,0 @@ -using LinearAlgebra, StatsFuns, Optim, Distributions, Formatting - -# test random numbers, Distributions -X = randn(1000, 10) -w = randn(10) -y = rand.(Bernoulli.(logistic.(X * w))) * 1.0 - -# test linear algebra -svd(X) - -# test optimization -epsilon = eps() -function logistic_regression_loss(w) - p = logistic.(X * w) - return -mean(y .* log.(p .+ epsilon) + (1 .- y) .* log.(1 .- p .+ epsilon)) -end - -true_loss = logistic_regression_loss(w) -optim_soln = optimize(logistic_regression_loss, zeros(10), BFGS()) - -# test formatting -delta = format("{:.4f}", true_loss - optim_soln.minimum) - -# (warning if optimizer performs poorly) -if true_loss < optim_soln.minimum - @warn "Optim found suboptimal solution by " * delta -end - -println("All functionality tests passed") diff --git a/tests/srd_smoke_tests/test_jupyter.ipynb b/tests/srd_smoke_tests/test_jupyter.ipynb deleted file mode 100644 index 59184400c2..0000000000 --- a/tests/srd_smoke_tests/test_jupyter.ipynb +++ /dev/null @@ -1,63 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "print(sys.version)\n", - "print(sys.executable)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!which python" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python --version" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!which pip" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip --version" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sys.path" - ] - } - ], - "metadata": {}, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/tests/srd_smoke_tests/test_mounted_drives.sh b/tests/srd_smoke_tests/test_mounted_drives.sh deleted file mode 100644 index 5862f2852c..0000000000 --- a/tests/srd_smoke_tests/test_mounted_drives.sh +++ /dev/null @@ -1,67 +0,0 @@ -#! /bin/bash -while getopts d: flag -do - case "${flag}" in - d) directory=${OPTARG};; - esac -done - -nfailed=0 -if [[ "$directory" = "home" ]]; then directory_path=$(echo ~); else directory_path="/${directory}"; fi -testfile="$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)" - -# Check that directory exists -if [ "$(ls ${directory_path} 2>&1 1>/dev/null)" ]; then - echo "Could not find mount '${directory_path}'" - nfailed=$((nfailed + 1)) -fi - -# Test operations -CAN_CREATE="$([[ "$(touch ${directory_path}/${testfile} 2>&1 1>/dev/null)" = "" ]] && echo '1' || echo '0')" -CAN_WRITE="$([[ -w ${directory_path}/${testfile} ]] && echo '1' || echo '0')" -CAN_DELETE="$([[ "$(touch ${directory_path}/${testfile} 2>&1 1>/dev/null | rm ${directory_path}/${testfile} 2>&1)" ]] && echo '0' || echo '1')" - -# Check that permissions are as expected for each directory -case "$directory" in - data) - if [ $CAN_CREATE = 1 ]; then echo "Able to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_WRITE = 1 ]; then echo "Able to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_DELETE = 1 ]; then echo "Able to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - ;; - - home) - if [ $CAN_CREATE = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_WRITE = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_DELETE = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - ;; - - output) - if [ $CAN_CREATE = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_WRITE = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_DELETE = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - ;; - - shared) - if [ $CAN_CREATE = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_WRITE = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_DELETE = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - ;; - - scratch) - if [ $CAN_CREATE = 0 ]; then echo "Unable to create files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_WRITE = 0 ]; then echo "Unable to write files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - if [ $CAN_DELETE = 0 ]; then echo "Unable to delete files in ${directory_path}!"; nfailed=$((nfailed + 1)); fi - ;; - - *) - echo "Usage: $0 -d [directory]" - exit 1 -esac - -# Cleanup and print output -rm -rf "${directory_path}/${testfile}" 2> /dev/null -if [ $nfailed = 0 ]; then - echo "All tests passed for '${directory_path}'" -else - echo "$nfailed tests failed for '${directory_path}'!" -fi diff --git a/tests/srd_smoke_tests/test_packages_installed_R.R b/tests/srd_smoke_tests/test_packages_installed_R.R deleted file mode 100644 index bd8aa46edd..0000000000 --- a/tests/srd_smoke_tests/test_packages_installed_R.R +++ /dev/null @@ -1,69 +0,0 @@ -library(stringr) - -# Initialise lists -warning_list <- c() -error_list <- c() - -# The following packages give errors that are false positives -# BiocManager: False positive - warning about not being able to connect to the internet -# clusterProfiler: Error is "multiple methods tables found for 'toTable'". Not yet understood -# flowUtils: False positive - warning about string translations -# GOSemSim: False positive - no warning on package load -# graphite: False positive - no warning on package load -# rgl: Error is because the X11 server could not be loaded -# tmap: False positive - no warning on package load -false_positive_list <- c("BiocManager", "clusterProfiler", "flowUtils", "GOSemSim", "graphite", "rgl", "tmap") - -# Test package with non-standard evaluation and append to the proper list -test_package <- function(p) { - print(paste("Testing '", p, "' ...", sep="")) - tryCatch( - eval(parse(text = paste0("library(", p, ")"))), - error = function(m) { - assign("error_list", c(error_list, p), envir = .GlobalEnv) - }, - warning = function(m) { - assign("warning_list", c(warning_list, p), envir = .GlobalEnv) - }, - message = function(m) { - # do nothing, as many packages print messages upon loading - } - ) -} - -scriptDirectory <- function() { - cmdArgs <- commandArgs(trailingOnly = FALSE) - match <- grep("--file=", cmdArgs) - return (dirname(normalizePath(sub("--file=", "", cmdArgs[match])))) -} - -# Read in the package list from the repo -repos <- c("CRAN", "Bioconductor") -n_packages = 0 -for (repo in repos) { - packageList = file.path(scriptDirectory(), "..", "package_lists", paste("packages-r-", tolower(repo), ".list", sep = "")) - packages <- readLines(packageList) - print(paste("Testing", length(packages), repo, "packages")) - for (package in packages) { - if (!(package %in% false_positive_list)) { - test_package(package) - } - } - n_packages = n_packages + length(packages) -} - -# Show results -if (0 == length(warning_list) & 0 == length(error_list)) { - print(paste("All", n_packages, "packages are installed")) -} else { - # List any warnings - if (0 < length(warning_list)) { - print(paste("The following", length(warning_list), "packages gave a warning:")) - cat(warning_list, sep = "\n") - } - # List any errors - if (0 < length(error_list)) { - print(paste("The following", length(error_list), "packages gave a error:")) - cat(error_list, sep = "\n") - } -} diff --git a/tests/srd_smoke_tests/test_packages_installed_julia.jl b/tests/srd_smoke_tests/test_packages_installed_julia.jl deleted file mode 100644 index 496e838a8d..0000000000 --- a/tests/srd_smoke_tests/test_packages_installed_julia.jl +++ /dev/null @@ -1,38 +0,0 @@ -using Pkg - -# Get the list of packages to check -missing_packages = String[] -packages = readlines(joinpath(@__DIR__, "..", "package_lists", "packages-julia.list")) -println("Testing ", size(packages, 1), " Julia packages") - -# Redirect stdout to suppress package building messages -original_stdout = stdout -(rd, wr) = redirect_stdout(); - -# Check for packages in two ways -for packageName in readlines(joinpath(@__DIR__, "..", "package_lists", "packages-julia.list")) - println("Testing '", packageName, "' ...") - # Check that the package exists - try - Pkg.status(packageName) - catch - push!(missing_packages, packageName) - end - # Check that the package is usable (NB. this can be slow) - try - package = Symbol(packageName) - @eval using $package - catch - push!(missing_packages, packageName) - end -end - -# Return to original stdout -redirect_stdout(original_stdout) - -# Print a summary of the package tests -if isempty(missing_packages) - println("All ", size(packages)[1], " packages are installed") -else - println("Packages not installed: ", Set(missing_packages)) -end diff --git a/tests/srd_smoke_tests/test_packages_installed_python.py b/tests/srd_smoke_tests/test_packages_installed_python.py deleted file mode 100644 index d91d3238f7..0000000000 --- a/tests/srd_smoke_tests/test_packages_installed_python.py +++ /dev/null @@ -1,157 +0,0 @@ -import glob -import os -import shutil -import subprocess -import sys -import warnings - -import pkg_resources - -versions = { - pkg.split(" ")[0]: pkg.split(" ")[-1] # get package name and version - for pkg in subprocess.run( - ["pip", "list"], stdout=subprocess.PIPE - ) # ... from pip list - .stdout.decode() # ... stdout converted to string - .split("\n")[1:] # ... splitting into lines and discard the header -} - -# Some packages cannot be imported so we skip them. -KNOWN_RESOURCE_ISSUES = [ - "backports", # does not define a package - "xgboost", # has dependencies on an external library -] - -# For these packages we check for an executable as they are not importable -NON_IMPORTABLE_PACKAGES = { - "pip-tools": "pip-compile", - "repro-catalogue": "catalogue", -} - -# Some packages are imported using a different name than they `pip install` with -IMPORTABLE_NAMES = { - "PyYAML": "yaml", - "beautifulsoup4": "bs4", - "DataShape": "datashape", - "Fiona": "fiona", - "Flask": "flask", - "Jinja2": "jinja2", - "Markdown": "markdown", - "pandas-profiling": "pandas_profiling", - "Pillow": "PIL", - "protobuf": "google.protobuf", - "pyshp": "shapefile", - "pystan": ("stan" if int(versions.get("pystan", "0")[0]) >= 3 else "pystan"), - "python-dateutil": "dateutil", - "PyWavelets": "pywt", - "scikit-image": "skimage", - "scikit-learn": "sklearn", - "spacy-langdetect": "spacy_langdetect", - "Sphinx": "sphinx", - "SQLAlchemy": "sqlalchemy", - "tensorflow-estimator": "tensorflow.estimator", - "Theano": "theano", - "torchvision": "torchvision", - "XlsxWriter": "xlsxwriter", -} - -# Tensorflow is a special case -TENSORFLOW_PACKAGES = [ - "tensorflow", - "tensorflow-gpu", -] - - -def get_python_version(): - """ - Get the current Python version as a string. - """ - v_info = sys.version_info - return { - "full": "{}.{}.{}".format(v_info.major, v_info.minor, v_info.micro), - } - - -def import_tensorflow(): - try: - warnings.simplefilter("ignore") - os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" - module_ = __import__("tensorflow.python.client", fromlist=["device_list"]) - device_names = [d.name for d in module_.device_lib.list_local_devices()] - print("Tensorflow can see the following devices %s" % device_names) - return True - except ImportError: - return False - - -def get_missing_packages(packages): - """ - Check that all requested packages are importable and that resources exist - """ - warnings.filterwarnings("ignore", category=DeprecationWarning) - warning, missing = [], [] - for package in packages: - print(f"Testing '{package}' ...") - # Some packages are not importable so we test for the executable instead - if package in NON_IMPORTABLE_PACKAGES.keys(): - if not shutil.which(NON_IMPORTABLE_PACKAGES[package]): - missing.append(package) - continue - # Tensorflow is a special case - elif package in TENSORFLOW_PACKAGES: - if not import_tensorflow(): - missing.append(package) - continue - # Test whether we can import - else: - importable_name = ( - IMPORTABLE_NAMES[package] if package in IMPORTABLE_NAMES else package - ) - try: - _ = __import__(importable_name) - except ImportError: - missing.append(package) - continue - # If we can, then test whether package resources exist - if package not in KNOWN_RESOURCE_ISSUES: - try: - pkg_resources.get_distribution(package) - except pkg_resources.DistributionNotFound: - warning.append(package) - - return (warning, missing) - - -def test_packages(): - version = get_python_version() - print("Python version %s found" % version["full"]) - pypi_package_lists = glob.glob( - os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "package_lists", - "packages-python-*", - ) - ) - matching_package_lists = [ - _list for _list in pypi_package_lists if version["full"] in _list - ] - if matching_package_lists: - with open(matching_package_lists[0], "r") as f_packages: - packages = [ - p.strip() for p in f_packages.readlines() if not p.startswith("#") - ] - print("Testing {} Python packages".format(len(packages))) - warning, missing = get_missing_packages(packages) - if warning: - print(f"The following {len(warning)} packages may be missing resources:") - print("\n".join(warning)) - if missing: - print(f"The following {len(missing)} packages are missing or broken:") - print("\n".join(missing)) - if (not warning) and (not missing): - print(f"All {len(packages)} packages are installed") - - -if __name__ == "__main__": - test_packages() diff --git a/tests/srd_smoke_tests/test_virtual_environments_python.sh b/tests/srd_smoke_tests/test_virtual_environments_python.sh deleted file mode 100755 index d71fd4178b..0000000000 --- a/tests/srd_smoke_tests/test_virtual_environments_python.sh +++ /dev/null @@ -1,73 +0,0 @@ -#! /bin/bash - -# Common variables -N_FAILED_TESTS=0 -PYTHON_VERSION=$1 -VENV_NAME="${PYTHON_VERSION}-test" -echo "Preparing to test Python $PYTHON_VERSION with virtual environment $VENV_NAME" - -# Test pyenv -body="Testing that pyenv exists" -echo -ne "[ ] ${body}\r" -which pyenv > /dev/null 2>&1 -# if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi -if test $?; then echo -e "[x] ${body}"; else echo -e "[\xE2\x9C\x94] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test pyenv versions -body="Testing pyenv versions" -echo -ne "[ ] ${body}\r" -pyenv versions > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test pyenv virtualenvs -body="Testing pyenv virtualenvs" -echo -ne "[ ] ${body}\r" -pyenv virtualenvs > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test virtualenv creation -body="Testing virtualenv creation" -echo -ne "[ ] ${body}\r" -pyenv virtualenv-delete -f "$VENV_NAME" 2> /dev/null -pyenv virtualenv -f "$PYTHON_VERSION" "$VENV_NAME" > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test virtualenv activation -body="Testing virtualenv activation" -echo -ne "[ ] ${body}\r" -pyenv activate "$VENV_NAME" > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test Python version -body="Testing Python version" -echo -ne "[ ] ${body}\r" -test "$(python --version)" == "Python ${PYTHON_VERSION}" > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test virtualenv packages -body="Testing virtualenv packages" -echo -ne "[ ] ${body}\r" -INSTALLED_PACKAGES=$(pip list --format=freeze | cut -d'=' -f1) -test "$(echo "$INSTALLED_PACKAGES" | wc -w)" -eq 3 > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Test virtualenv package installation -body="Testing virtualenv package installation" -echo -ne "[ ] ${body}\r" -pip install matplotlib --quiet -INSTALLED_PACKAGES=$(pip list --format=freeze | cut -d'=' -f1) -test "$(echo "$INSTALLED_PACKAGES" | tr ' ' '\n' | grep "matplotlib")" == "matplotlib" > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Tear down a new virtual environment -body="Testing virtualenv deletion" -echo -ne "[ ] ${body}\r" -pyenv virtualenv-delete -f "$VENV_NAME" > /dev/null 2>&1 -if test $?; then echo -e "[\xE2\x9C\x94] ${body}"; else echo -e "[x] ${body}"; N_FAILED_TESTS=$((N_FAILED_TESTS + 1)); fi - -# Cleanup and print output -if [ $N_FAILED_TESTS = 0 ]; then - echo "All tests passed for Python ${PYTHON_VERSION}" -else - echo "$N_FAILED_TESTS tests failed for Python ${PYTHON_VERSION}!" -fi diff --git a/tests/validators/test_typer_validators.py b/tests/validators/test_typer_validators.py new file mode 100644 index 0000000000..f2dabc619b --- /dev/null +++ b/tests/validators/test_typer_validators.py @@ -0,0 +1,64 @@ +import pytest +from typer import BadParameter + +from data_safe_haven import validators + + +class TestTyperAadGuid: + @pytest.mark.parametrize( + "guid", + [ + "d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + "10de18e7-b238-6f1e-a4ad-772708929203", + ], + ) + def test_typer_aad_guid(self, guid): + assert validators.typer_aad_guid(guid) == guid + + @pytest.mark.parametrize( + "guid", + [ + "10de18e7_b238_6f1e_a4ad_772708929203", + "not a guid", + ], + ) + def test_typer_aad_guid_fail(self, guid): + with pytest.raises(BadParameter, match="Expected GUID"): + validators.typer_aad_guid(guid) + + def test_typer_aad_guid_none(self): + assert validators.typer_aad_guid(None) is None + + +class TestTyperAzureSubscriptionName: + @pytest.mark.parametrize( + "subscription_name", + [ + "MySubscription", + "Example-Subscription", + "Subscription5", + ], + ) + def test_typer_subscription_name(self, subscription_name): + assert ( + validators.typer_azure_subscription_name(subscription_name) + == subscription_name + ) + + @pytest.mark.parametrize( + "subscription_name", + [ + "My!Subscription", + "", + "%^*", + "1@ subscription", + "sübscríptìőn", + "🙂", + ], + ) + def test_subscription_name_fail(self, subscription_name): + with pytest.raises(BadParameter, match="can only contain alphanumeric"): + validators.typer_azure_subscription_name(subscription_name) + + def test_typer_aad_guid_none(self): + assert validators.typer_azure_subscription_name(None) is None diff --git a/tests/validators/test_validators.py b/tests/validators/test_validators.py new file mode 100644 index 0000000000..1c38e551f8 --- /dev/null +++ b/tests/validators/test_validators.py @@ -0,0 +1,154 @@ +import pytest + +from data_safe_haven import validators +from data_safe_haven.types import DatabaseSystem + + +class TestValidateAadGuid: + @pytest.mark.parametrize( + "guid", + [ + "d5c5c439-1115-4cb6-ab50-b8e547b6c8dd", + "10de18e7-b238-6f1e-a4ad-772708929203", + ], + ) + def test_aad_guid(self, guid): + assert validators.aad_guid(guid) == guid + + @pytest.mark.parametrize( + "guid", + [ + "10de18e7_b238_6f1e_a4ad_772708929203", + "not a guid", + ], + ) + def test_aad_guid_fail(self, guid): + with pytest.raises(ValueError, match="Expected GUID"): + validators.aad_guid(guid) + + +class TestAzureSubscriptionName: + @pytest.mark.parametrize( + "subscription_name", + [ + "My Subscription", + "Example-Subscription", + "Subscription5", + ], + ) + def test_subscription_name(self, subscription_name): + assert ( + validators.azure_subscription_name(subscription_name) == subscription_name + ) + + @pytest.mark.parametrize( + "subscription_name", + [ + "My!Subscription", + "", + "%^*", + "1@ subscription", + "sübscríptìőn", + "🙂", + ], + ) + def test_subscription_name_fail(self, subscription_name): + with pytest.raises(ValueError, match="can only contain alphanumeric"): + validators.azure_subscription_name(subscription_name) + + +class TestValidateFqdn: + @pytest.mark.parametrize( + "fqdn", + [ + "shm.acme.com", + "example.com", + "a.b.c.com.", + "a-b-c.com", + ], + ) + def test_fqdn(self, fqdn): + assert validators.fqdn(fqdn) == fqdn + + @pytest.mark.parametrize( + "fqdn", + [ + "invalid", + "%example.com", + "a b c.com", + "a_b_c.com", + ], + ) + def test_fqdn_fail(self, fqdn): + with pytest.raises( + ValueError, match="Expected valid fully qualified domain name" + ): + validators.fqdn(fqdn) + + +class TestValidateSafeString: + @pytest.mark.parametrize( + "safe_string", + [ + "valid_with_underscores-and-hyphens", + "mIxeDCAseiNpuT", + "0123456789", + ], + ) + def test_safe_string(self, safe_string): + assert validators.safe_string(safe_string) == safe_string + + @pytest.mark.parametrize( + "safe_string", + [ + "has a space", + "has!special@characters", + "has\tnon\rprinting\ncharacters", + "", + "🙂", + ], + ) + def test_safe_string_fail(self, safe_string): + with pytest.raises( + ValueError, + match="Expected valid string containing only letters, numbers, hyphens and underscores", + ): + validators.safe_string(safe_string) + + +class MyClass: + def __init__(self, x): + self.x = x + + def __eq__(self, other): + return self.x == other.x + + def __hash__(self): + return hash(self.x) + + +class TestUniqueList: + @pytest.mark.parametrize( + "items", + [ + [1, 2, 3], + ["a", 5, len], + [MyClass(x=1), MyClass(x=2)], + ], + ) + def test_unique_list(self, items): + validators.unique_list(items) + + @pytest.mark.parametrize( + "items", + [ + [DatabaseSystem.POSTGRESQL, DatabaseSystem.POSTGRESQL], + [DatabaseSystem.POSTGRESQL, 2, DatabaseSystem.POSTGRESQL], + [1, 1], + ["abc", "abc"], + [MyClass(x=1), MyClass(x=1)], + ], + ) + def test_unique_list_fail(self, items): + with pytest.raises(ValueError, match="All items must be unique."): + validators.unique_list(items) diff --git a/typings/fqdn/__init__.pyi b/typings/fqdn/__init__.pyi new file mode 100644 index 0000000000..61cf4a40d7 --- /dev/null +++ b/typings/fqdn/__init__.pyi @@ -0,0 +1,7 @@ +from typing import Any + + +class FQDN: + def __init__(self, fqdn: Any, *nothing: list[Any], **kwags: dict[Any, Any]) -> None: ... + @property + def is_valid(self) -> bool: ... diff --git a/typings/pulumi/__init__.pyi b/typings/pulumi/__init__.pyi new file mode 100644 index 0000000000..e1468220dd --- /dev/null +++ b/typings/pulumi/__init__.pyi @@ -0,0 +1,31 @@ +import pulumi.automation as automation +import pulumi.dynamic as dynamic +from pulumi.asset import FileAsset +from pulumi.config import ( + Config, +) +from pulumi.output import ( + Input, + Output, + UNKNOWN, +) +from pulumi.resource import ( + ComponentResource, + Resource, + ResourceOptions, + export, +) + +__all__ = [ + "automation", + "ComponentResource", + "Config", + "dynamic", + "export", + "FileAsset", + "Input", + "Output", + "Resource", + "ResourceOptions", + "UNKNOWN", +] diff --git a/typings/pulumi_azure_native/__init__.pyi b/typings/pulumi_azure_native/__init__.pyi new file mode 100644 index 0000000000..56be0a1e3a --- /dev/null +++ b/typings/pulumi_azure_native/__init__.pyi @@ -0,0 +1,37 @@ +import pulumi_azure_native._utilities as _utilities +import pulumi_azure_native.automation as automation +import pulumi_azure_native.authorization as authorization +import pulumi_azure_native.compute as compute +import pulumi_azure_native.containerinstance as containerinstance +import pulumi_azure_native.dataprotection as dataprotection +import pulumi_azure_native.dbforpostgresql as dbforpostgresql +import pulumi_azure_native.insights as insights +import pulumi_azure_native.keyvault as keyvault +import pulumi_azure_native.maintenance as maintenance +import pulumi_azure_native.managedidentity as managedidentity +import pulumi_azure_native.network as network +import pulumi_azure_native.operationalinsights as operationalinsights +import pulumi_azure_native.operationsmanagement as operationsmanagement +import pulumi_azure_native.resources as resources +import pulumi_azure_native.sql as sql +import pulumi_azure_native.storage as storage + +__all__ = [ + "automation", + "authorization", + "compute", + "containerinstance", + "dataprotection", + "dbforpostgresql", + "insights", + "keyvault", + "maintenance", + "managedidentity", + "network", + "operationalinsights", + "operationsmanagement", + "resources", + "sql", + "storage", + "_utilities", +] diff --git a/typings/typer/__init__.pyi b/typings/typer/__init__.pyi new file mode 100644 index 0000000000..16e679947b --- /dev/null +++ b/typings/typer/__init__.pyi @@ -0,0 +1,15 @@ +from click.exceptions import BadParameter, Exit +from click.termui import confirm + +from .main import Typer +from .params import Argument +from .params import Option + +__all__ = [ + "Argument", + "BadParameter", + "Exit", + "Option", + "Typer", + "confirm", +] diff --git a/typings/typer/main.pyi b/typings/typer/main.pyi new file mode 100644 index 0000000000..ffddbfe45e --- /dev/null +++ b/typings/typer/main.pyi @@ -0,0 +1,68 @@ +from _typeshed import Incomplete +from typing import Any, Callable, Dict, Optional, Type, Union +from .models import CommandFunctionType, Default + +TyperCommand = Incomplete +TyperGroup = Incomplete + +class Typer: + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def callback( + self, + name: Optional[str] = Default(None), + *, + cls: Optional[Type[TyperGroup]] = Default(None), + invoke_without_command: bool = Default(False), + no_args_is_help: bool = Default(False), + subcommand_metavar: Optional[str] = Default(None), + chain: bool = Default(False), + result_callback: Optional[Callable[..., Any]] = Default(None), + context_settings: Optional[Dict[Any, Any]] = Default(None), + help: Optional[str] = Default(None), + epilog: Optional[str] = Default(None), + short_help: Optional[str] = Default(None), + options_metavar: str = Default("[OPTIONS]"), + add_help_option: bool = Default(True), + hidden: bool = Default(False), + deprecated: bool = Default(False), + rich_help_panel: Union[str, None] = Default(None), + ) -> Callable[[CommandFunctionType], CommandFunctionType]: ... + def command( + self, + name: Optional[str] = None, + *, + cls: Optional[Type[TyperCommand]] = None, + context_settings: Optional[Dict[Any, Any]] = None, + help: Optional[str] = None, + epilog: Optional[str] = None, + short_help: Optional[str] = None, + options_metavar: str = "[OPTIONS]", + add_help_option: bool = True, + no_args_is_help: bool = False, + hidden: bool = False, + deprecated: bool = False, + rich_help_panel: Union[str, None] = Default(None), + ) -> Callable[[CommandFunctionType], CommandFunctionType]: ... + def add_typer( + self, + typer_instance: "Typer", + *, + name: Optional[str] = Default(None), + cls: Optional[Type[TyperGroup]] = Default(None), + invoke_without_command: bool = Default(False), + no_args_is_help: bool = Default(False), + subcommand_metavar: Optional[str] = Default(None), + chain: bool = Default(False), + result_callback: Optional[Callable[..., Any]] = Default(None), + context_settings: Optional[Dict[Any, Any]] = Default(None), + callback: Optional[Callable[..., Any]] = Default(None), + help: Optional[str] = Default(None), + epilog: Optional[str] = Default(None), + short_help: Optional[str] = Default(None), + options_metavar: str = Default("[OPTIONS]"), + add_help_option: bool = Default(True), + hidden: bool = Default(False), + deprecated: bool = Default(False), + rich_help_panel: Union[str, None] = Default(None), + ) -> None: ... diff --git a/typings/typer/models.pyi b/typings/typer/models.pyi new file mode 100644 index 0000000000..348504d574 --- /dev/null +++ b/typings/typer/models.pyi @@ -0,0 +1,6 @@ +from typing import Any, Callable, TypeVar + +CommandFunctionType = TypeVar("CommandFunctionType", bound=Callable[..., Any]) +DefaultType = TypeVar("DefaultType") + +def Default(value: DefaultType) -> DefaultType: ... diff --git a/typings/typer/params.pyi b/typings/typer/params.pyi new file mode 100644 index 0000000000..c5d731e507 --- /dev/null +++ b/typings/typer/params.pyi @@ -0,0 +1,4 @@ +from typing import Any + +def Argument(*args: Any, **kwargs: Any) -> Any: ... +def Option(*args: Any, **kwargs: Any) -> Any: ...