diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..5510315 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,40 @@ +--- +name: Bug Report +about: Help us diagnose and fix bugs in Crossplane Provider GitLab +labels: bug +--- + + +### What happened? + + + +### How can we reproduce it? + + +### What environment did it happen in? +Crossplane version: +Crossplane Provider GitLab version: + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..83ec864 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,24 @@ +--- +name: Feature Request +about: Help us make Crossplane Provider GitLab more useful +labels: enhancement +--- + + +### What problem are you facing? + + +### How could Crossplane help solve your problem? + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..7627d1a --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,35 @@ + + +### Description of your changes + + +Fixes # + +I have: + +- [ ] Read and followed Crossplane's [contribution process]. +- [ ] Run `make reviewable test` to ensure this PR is ready for review. + +### How has this code been tested + + + +[contribution process]: https://git.io/fj2m9 diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml new file mode 100644 index 0000000..8b850da --- /dev/null +++ b/.github/workflows/backport.yaml @@ -0,0 +1,34 @@ +name: Backport + +on: + # NOTE(negz): This is a risky target, but we run this action only when and if + # a PR is closed, then filter down to specifically merged PRs. We also don't + # invoke any scripts, etc from within the repo. I believe the fact that we'll + # be able to review PRs before this runs makes this fairly safe. + # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + pull_request_target: + types: [closed] + # See also commands.yml for the /backport triggered variant of this workflow. + +jobs: + # NOTE(negz): I tested many backport GitHub actions before landing on this + # one. Many do not support merge commits, or do not support pull requests with + # more than one commit. This one does. It also handily links backport PRs with + # new PRs, and provides commentary and instructions when it can't backport. + # The main gotchas with this action are that it _only_ supports merge commits, + # and that PRs _must_ be labelled before they're merged to trigger a backport. + open-pr: + runs-on: ubuntu-20.04 + if: github.event.pull_request.merged + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Open Backport PR + uses: zeebe-io/backport-action@v0.0.4 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + github_workspace: ${{ github.workspace }} + version: v0.0.4 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..8a1df8c --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,330 @@ +name: CI + +on: + push: + branches: + - master + - main # TODO: change default branch to master and remove this line + - release-* + pull_request: {} + workflow_dispatch: {} + +env: + # Common versions + GO_VERSION: '1.20' + GOLANGCI_VERSION: 'v1.52.2' + DOCKER_BUILDX_VERSION: 'v0.8.2' + + # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run + # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether + # credentials have been provided before trying to run steps that need them. + CONTRIB_DOCKER_USR: ${{ secrets.CONTRIB_DOCKER_USR }} + XPKG_ACCESS_ID: ${{ secrets.XPKG_ACCESS_ID }} + AWS_USR: ${{ secrets.AWS_USR }} + +jobs: + detect-noop: + runs-on: ubuntu-20.04 + outputs: + noop: ${{ steps.noop.outputs.should_skip }} + steps: + - name: Detect No-op Changes + id: noop + uses: fkirc/skip-duplicate-actions@v2.0.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + paths_ignore: '["**.md", "**.png", "**.jpg"]' + do_not_skip: '["workflow_dispatch", "schedule", "push"]' + + + lint: + runs-on: ubuntu-20.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: true + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Find the Go Build Cache + id: go + run: echo "::set-output name=cache::$(make go.cachedir)" + + - name: Cache the Go Build Cache + uses: actions/cache@v2 + with: + path: ${{ steps.go.outputs.cache }} + key: ${{ runner.os }}-build-lint-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-build-lint- + + - name: Cache Go Dependencies + uses: actions/cache@v2 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-pkg- + + - name: Vendor Dependencies + run: make vendor vendor.check + + # We could run 'make lint' but we prefer this action because it leaves + # 'annotations' (i.e. it comments on PRs to point out linter violations). + - name: Lint + uses: golangci/golangci-lint-action@v3 + with: + version: ${{ env.GOLANGCI_VERSION }} + + check-diff: + runs-on: ubuntu-20.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: true + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Find the Go Build Cache + id: go + run: echo "::set-output name=cache::$(make go.cachedir)" + + - name: Cache the Go Build Cache + uses: actions/cache@v2 + with: + path: ${{ steps.go.outputs.cache }} + key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-build-check-diff- + + - name: Cache Go Dependencies + uses: actions/cache@v2 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-pkg- + + - name: Vendor Dependencies + run: make vendor vendor.check + + - name: Check Diff + run: make check-diff + + unit-tests: + runs-on: ubuntu-20.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: true + + - name: Fetch History + run: git fetch --prune --unshallow + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Find the Go Build Cache + id: go + run: echo "::set-output name=cache::$(make go.cachedir)" + + - name: Cache the Go Build Cache + uses: actions/cache@v2 + with: + path: ${{ steps.go.outputs.cache }} + key: ${{ runner.os }}-build-unit-tests-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-build-unit-tests- + + - name: Cache Go Dependencies + uses: actions/cache@v2 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-pkg- + + - name: Vendor Dependencies + run: make vendor vendor.check + + - name: Run Unit Tests + run: make -j2 test + + - name: Publish Unit Test Coverage + uses: codecov/codecov-action@v1 + with: + flags: unittests + file: _output/tests/linux_amd64/coverage.txt + + e2e-tests: + runs-on: ubuntu-20.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Setup QEMU + uses: docker/setup-qemu-action@v1 + with: + platforms: all + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + version: ${{ env.DOCKER_BUILDX_VERSION }} + install: true + + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: true + + - name: Fetch History + run: git fetch --prune --unshallow + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Find the Go Build Cache + id: go + run: echo "::set-output name=cache::$(make go.cachedir)" + + - name: Cache the Go Build Cache + uses: actions/cache@v2 + with: + path: ${{ steps.go.outputs.cache }} + key: ${{ runner.os }}-build-e2e-tests-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-build-e2e-tests- + + - name: Cache Go Dependencies + uses: actions/cache@v2 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-pkg- + + - name: Vendor Dependencies + run: make vendor vendor.check + + - name: Build Helm Chart + run: make -j2 build + env: + # We're using docker buildx, which doesn't actually load the images it + # builds by default. Specifying --load does so. + BUILD_ARGS: "--load" + + - name: Run E2E Tests + run: make e2e USE_HELM3=true + + publish-artifacts: + runs-on: ubuntu-20.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Setup QEMU + uses: docker/setup-qemu-action@v1 + with: + platforms: all + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + version: ${{ env.DOCKER_BUILDX_VERSION }} + install: true + + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: true + + - name: Fetch History + run: git fetch --prune --unshallow + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Find the Go Build Cache + id: go + run: echo "::set-output name=cache::$(make go.cachedir)" + + - name: Cache the Go Build Cache + uses: actions/cache@v2 + with: + path: ${{ steps.go.outputs.cache }} + key: ${{ runner.os }}-build-publish-artifacts-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-build-publish-artifacts- + + - name: Cache Go Dependencies + uses: actions/cache@v2 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-pkg- + + - name: Vendor Dependencies + run: make vendor vendor.check + + - name: Build Artifacts + run: make -j2 build.all + env: + # We're using docker buildx, which doesn't actually load the images it + # builds by default. Specifying --load does so. + BUILD_ARGS: "--load" + + - name: Publish Artifacts to GitHub + uses: actions/upload-artifact@v2 + with: + name: output + path: _output/** + + - name: Login to Docker + uses: docker/login-action@v1 + if: env.CONTRIB_DOCKER_USR != '' + with: + username: ${{ secrets.CONTRIB_DOCKER_USR }} + password: ${{ secrets.CONTRIB_DOCKER_PSW }} + + - name: Login to Upbound + uses: docker/login-action@v1 + if: env.XPKG_ACCESS_ID != '' + with: + registry: xpkg.upbound.io + username: ${{ secrets.XPKG_ACCESS_ID }} + password: ${{ secrets.XPKG_TOKEN }} + + - name: Publish Artifacts to S3 and Docker Hub + run: make -j2 publish BRANCH_NAME=${GITHUB_REF##*/} + if: env.AWS_USR != '' && env.CONTRIB_DOCKER_USR != '' + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} + GIT_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Promote Artifacts in S3 and Docker Hub + if: github.ref == 'refs/heads/master' && env.AWS_USR != '' && env.CONTRIB_DOCKER_USR != '' + run: make -j2 promote + env: + BRANCH_NAME: master + CHANNEL: master + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} + diff --git a/.github/workflows/commands.yaml b/.github/workflows/commands.yaml new file mode 100644 index 0000000..815838b --- /dev/null +++ b/.github/workflows/commands.yaml @@ -0,0 +1,92 @@ +name: Comment Commands + +on: issue_comment + +jobs: + points: + runs-on: ubuntu-20.04 + if: startsWith(github.event.comment.body, '/points') + + steps: + - name: Extract Command + id: command + uses: xt0rted/slash-command-action@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + command: points + reaction: "true" + reaction-type: "eyes" + allow-edits: "false" + permission-level: write + - name: Handle Command + uses: actions/github-script@v4 + env: + POINTS: ${{ steps.command.outputs.command-arguments }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const points = process.env.POINTS + + if (isNaN(parseInt(points))) { + console.log("Malformed command - expected '/points '") + github.reactions.createForIssueComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: context.payload.comment.id, + content: "confused" + }) + return + } + const label = "points/" + points + + // Delete our needs-points-label label. + try { + await github.issues.deleteLabel({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + name: ['needs-points-label'] + }) + console.log("Deleted 'needs-points-label' label.") + } + catch(e) { + console.log("Label 'needs-points-label' probably didn't exist.") + } + + // Add our points label. + github.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: [label] + }) + console.log("Added '" + label + "' label.") + + # NOTE(negz): See also backport.yml, which is the variant that triggers on PR + # merge rather than on comment. + backport: + runs-on: ubuntu-20.04 + if: github.event.issue.pull_request && startsWith(github.event.comment.body, '/backport') + steps: + - name: Extract Command + id: command + uses: xt0rted/slash-command-action@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + command: backport + reaction: "true" + reaction-type: "eyes" + allow-edits: "false" + permission-level: write + + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Open Backport PR + uses: zeebe-io/backport-action@v0.0.4 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + github_workspace: ${{ github.workspace }} + version: v0.0.4 diff --git a/.github/workflows/promote.yaml b/.github/workflows/promote.yaml new file mode 100644 index 0000000..a8767e6 --- /dev/null +++ b/.github/workflows/promote.yaml @@ -0,0 +1,57 @@ +name: Promote + +on: + workflow_dispatch: + inputs: + version: + description: 'Release version (e.g. v0.1.0)' + required: true + channel: + description: 'Release channel' + required: true + default: 'alpha' + +env: + # Common versions + GO_VERSION: '1.18' + + # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run + # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether + # credentials have been provided before trying to run steps that need them. + CONTRIB_DOCKER_USR: ${{ secrets.CONTRIB_DOCKER_USR }} + AWS_USR: ${{ secrets.AWS_USR }} + +jobs: + promote-artifacts: + runs-on: ubuntu-20.04 + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: true + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Fetch History + run: git fetch --prune --unshallow + + - name: Login to Docker + uses: docker/login-action@v1 + if: env.CONTRIB_DOCKER_USR != '' + with: + username: ${{ secrets.CONTRIB_DOCKER_USR }} + password: ${{ secrets.CONTRIB_DOCKER_PSW }} + + - name: Promote Artifacts in S3 and Docker Hub + if: env.AWS_USR != '' && env.CONTRIB_DOCKER_USR != '' + run: make -j2 promote BRANCH_NAME=${GITHUB_REF##*/} + env: + VERSION: ${{ github.event.inputs.version }} + CHANNEL: ${{ github.event.inputs.channel }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} + diff --git a/.github/workflows/tag.yaml b/.github/workflows/tag.yaml new file mode 100644 index 0000000..3b272ea --- /dev/null +++ b/.github/workflows/tag.yaml @@ -0,0 +1,26 @@ +name: Tag + +on: + workflow_dispatch: + inputs: + version: + description: 'Release version (e.g. v0.1.0)' + required: true + message: + description: 'Tag message' + required: true + +jobs: + create-tag: + runs-on: ubuntu-20.04 + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Create Tag + uses: negz/create-tag@v1 + with: + version: ${{ github.event.inputs.version }} + message: ${{ github.event.inputs.message }} + token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..18edcaa --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Code of Conduct + +Upjet is under [the Apache 2.0 license](LICENSE) with [notice](NOTICE). \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..b51c1dc --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Upbound Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..56780f8 --- /dev/null +++ b/Makefile @@ -0,0 +1,159 @@ +# ==================================================================================== +# Setup Project + +PROJECT_NAME ?= provider-confluent +PROJECT_REPO ?= github.com/crossplane-contrib/$(PROJECT_NAME) + +export TERRAFORM_VERSION ?= 1.3.3 + +export TERRAFORM_PROVIDER_SOURCE := confluentinc/confluent +export TERRAFORM_PROVIDER_REPO := https://github.com/confluentinc/terraform-provider-confluent +export TERRAFORM_PROVIDER_VERSION := 1.28.0 +export TERRAFORM_PROVIDER_DOWNLOAD_NAME := terraform-provider-confluent +export TERRAFORM_NATIVE_PROVIDER_BINARY := terraform-provider-confluent_1.28.0 +export TERRAFORM_DOCS_PATH := docs/resources + +PLATFORMS ?= linux_amd64 linux_arm64 + +# kind-related versions +KIND_VERSION ?= v0.14.0 +KIND_NODE_IMAGE_TAG ?= v1.24.0 + +# -include will silently skip missing files, which allows us +# to load those files with a target in the Makefile. If only +# "include" was used, the make command would fail and refuse +# to run a target until the include commands succeeded. +-include build/makelib/common.mk + +# ==================================================================================== +# Setup Output + +-include build/makelib/output.mk + +# ==================================================================================== +# Setup Go + +# Set a sane default so that the nprocs calculation below is less noisy on the initial +# loading of this file +NPROCS ?= 1 + +# each of our test suites starts a kube-apiserver and running many test suites in +# parallel can lead to high CPU utilization. by default we reduce the parallelism +# to half the number of CPU cores. +GO_TEST_PARALLEL := $(shell echo $$(( $(NPROCS) / 2 ))) + +GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/provider +GO_LDFLAGS += -X $(GO_PROJECT)/pkg/version.Version=$(VERSION) +GO_SUBDIRS += cmd pkg apis +GO111MODULE = on +-include build/makelib/golang.mk + +# ==================================================================================== +# Setup Kubernetes tools + +UP_VERSION = v0.13.0 +UP_CHANNEL = stable +-include build/makelib/k8s_tools.mk + +# ==================================================================================== +# Setup Images + +IMAGES = provider-confluent +-include build/makelib/imagelight.mk + +# ==================================================================================== +# Setup XPKG + +XPKG_REG_ORGS ?= xpkg.upbound.io/crossplane-contrib index.docker.io/crossplanecontrib +# NOTE(hasheddan): skip promoting on xpkg.upbound.io as channel tags are +# inferred. +XPKG_REG_ORGS_NO_PROMOTE ?= xpkg.upbound.io/crossplane-contrib +XPKGS = provider-confluent +-include build/makelib/xpkg.mk + +# NOTE(hasheddan): we force image building to happen prior to xpkg build so that +# we ensure image is present in daemon. +xpkg.build.provider-confluent: do.build.images + +# ==================================================================================== +# Targets + +# run `make help` to see the targets and options + +# We want submodules to be set up the first time `make` is run. +# We manage the build/ folder and its Makefiles as a submodule. +# The first time `make` is run, the includes of build/*.mk files will +# all fail, and this target will be run. The next time, the default as defined +# by the includes will be run instead. +fallthrough: submodules + @echo Initial setup complete. Running make again . . . + @make + +# Generate a coverage report for cobertura applying exclusions on +# - generated file +cobertura: + @cat $(GO_TEST_OUTPUT)/coverage.txt | \ + grep -v zz_generated.deepcopy | \ + $(GOCOVER_COBERTURA) > $(GO_TEST_OUTPUT)/cobertura-coverage.xml + +manifests: + @$(WARN) Deprecated. Please run make generate instead. + +# integration tests +e2e.run: test-integration + +# Run integration tests. +test-integration: $(KIND) $(KUBECTL) $(UP) $(HELM3) + @$(INFO) running integration tests using kind $(KIND_VERSION) + @KIND_NODE_IMAGE_TAG=${KIND_NODE_IMAGE_TAG} $(ROOT_DIR)/cluster/local/integration_tests.sh || $(FAIL) + @$(OK) integration tests passed + +# Update the submodules, such as the common build scripts. +submodules: + @git submodule sync + @git submodule update --init --recursive + +# NOTE(hasheddan): the build submodule currently overrides XDG_CACHE_HOME in +# order to force the Helm 3 to use the .work/helm directory. This causes Go on +# Linux machines to use that directory as the build cache as well. We should +# adjust this behavior in the build submodule because it is also causing Linux +# users to duplicate their build cache, but for now we just make it easier to +# identify its location in CI so that we cache between builds. +go.cachedir: + @go env GOCACHE + +# NOTE(hasheddan): we must ensure up is installed in tool cache prior to build +# as including the k8s_tools machinery prior to the xpkg machinery sets UP to +# point to tool cache. +build.init: $(UP) + +# This is for running out-of-cluster locally, and is for convenience. Running +# this make target will print out the command which was used. For more control, +# try running the binary directly with different arguments. +run: go.build + @$(INFO) Running Crossplane locally out-of-cluster . . . + @# To see other arguments that can be provided, run the command with --help instead + $(GO_OUT_DIR)/provider --debug + +.PHONY: cobertura manifests submodules fallthrough test-integration run crds.clean + +# ==================================================================================== +# Special Targets + +define CROSSPLANE_MAKE_HELP +Crossplane Targets: + cobertura Generate a coverage report for cobertura applying exclusions on generated files. + submodules Update the submodules, such as the common build scripts. + run Run crossplane locally, out-of-cluster. Useful for development. + +endef +# The reason CROSSPLANE_MAKE_HELP is used instead of CROSSPLANE_HELP is because the crossplane +# binary will try to use CROSSPLANE_HELP if it is set, and this is for something different. +export CROSSPLANE_MAKE_HELP + +crossplane.help: + @echo "$$CROSSPLANE_MAKE_HELP" + +help-special: crossplane.help + +.PHONY: crossplane.help help-special \ No newline at end of file diff --git a/OWNERS.md b/OWNERS.md new file mode 100644 index 0000000..599b40e --- /dev/null +++ b/OWNERS.md @@ -0,0 +1,13 @@ +# OWNERS + +This page lists all maintainers for **this** repository. Each repository in the [Upbound +organization](https://github.com/upbound/) will list their repository maintainers in their own +`OWNERS.md` file. + + +## Maintainers + +* Alper Ulucinar ([ulucinar](https://github.com/ulucinar)) +* Sergen Yalcin ([sergenyalcin](https://github.com/sergenyalcin)) + +See [CODEOWNERS](./CODEOWNERS) for automatic PR assignment. diff --git a/README.md b/README.md index 36c0fa7..0583499 100644 --- a/README.md +++ b/README.md @@ -1 +1,60 @@ -# provider-confluent \ No newline at end of file +# Provider Confluent + +`provider-confluent` is a [Crossplane](https://crossplane.io/) provider that +is built using [Upjet](https://github.com/upbound/upjet) code +generation tools and exposes XRM-conformant managed resources for the +Confluent API. + +## Getting Started + +Install the provider by using the following command after changing the image tag +to the [latest release](https://marketplace.upbound.io/providers/crossplane-contrib/provider-confluent): +``` +up ctp provider install crossplane-contrib/provider-confluent:v0.1.0 +``` + +Alternatively, you can use declarative installation: +``` +cat < +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type Key struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.owner)",message="owner is a required parameter" + Spec KeySpec `json:"spec"` + Status KeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KeyList contains a list of Keys +type KeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Key `json:"items"` +} + +// Repository type metadata. +var ( + Key_Kind = "Key" + Key_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Key_Kind}.String() + Key_KindAPIVersion = Key_Kind + "." + CRDGroupVersion.String() + Key_GroupVersionKind = CRDGroupVersion.WithKind(Key_Kind) +) + +func init() { + SchemeBuilder.Register(&Key{}, &KeyList{}) +} diff --git a/apis/confluent/v1alpha1/zz_environment_types.go b/apis/confluent/v1alpha1/zz_environment_types.go new file mode 100755 index 0000000..d0c4133 --- /dev/null +++ b/apis/confluent/v1alpha1/zz_environment_types.go @@ -0,0 +1,82 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EnvironmentObservation struct { + + // A human-readable name for the Environment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Confluent Resource Name of the Environment. + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` +} + +type EnvironmentParameters struct { + + // A human-readable name for the Environment. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` +} + +// EnvironmentSpec defines the desired state of Environment +type EnvironmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EnvironmentParameters `json:"forProvider"` +} + +// EnvironmentStatus defines the observed state of Environment. +type EnvironmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EnvironmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Environment is the Schema for the Environments API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type Environment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.displayName)",message="displayName is a required parameter" + Spec EnvironmentSpec `json:"spec"` + Status EnvironmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EnvironmentList contains a list of Environments +type EnvironmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Environment `json:"items"` +} + +// Repository type metadata. +var ( + Environment_Kind = "Environment" + Environment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Environment_Kind}.String() + Environment_KindAPIVersion = Environment_Kind + "." + CRDGroupVersion.String() + Environment_GroupVersionKind = CRDGroupVersion.WithKind(Environment_Kind) +) + +func init() { + SchemeBuilder.Register(&Environment{}, &EnvironmentList{}) +} diff --git a/apis/confluent/v1alpha1/zz_generated.deepcopy.go b/apis/confluent/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..04c9339 --- /dev/null +++ b/apis/confluent/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,157 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Environment) DeepCopyInto(out *Environment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Environment. +func (in *Environment) DeepCopy() *Environment { + if in == nil { + return nil + } + out := new(Environment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Environment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentList) DeepCopyInto(out *EnvironmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Environment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentList. +func (in *EnvironmentList) DeepCopy() *EnvironmentList { + if in == nil { + return nil + } + out := new(EnvironmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvironmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentObservation) DeepCopyInto(out *EnvironmentObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentObservation. +func (in *EnvironmentObservation) DeepCopy() *EnvironmentObservation { + if in == nil { + return nil + } + out := new(EnvironmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentParameters) DeepCopyInto(out *EnvironmentParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentParameters. +func (in *EnvironmentParameters) DeepCopy() *EnvironmentParameters { + if in == nil { + return nil + } + out := new(EnvironmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentSpec) DeepCopyInto(out *EnvironmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentSpec. +func (in *EnvironmentSpec) DeepCopy() *EnvironmentSpec { + if in == nil { + return nil + } + out := new(EnvironmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentStatus) DeepCopyInto(out *EnvironmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentStatus. +func (in *EnvironmentStatus) DeepCopy() *EnvironmentStatus { + if in == nil { + return nil + } + out := new(EnvironmentStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/confluent/v1alpha1/zz_generated.managed.go b/apis/confluent/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..c78885e --- /dev/null +++ b/apis/confluent/v1alpha1/zz_generated.managed.go @@ -0,0 +1,84 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Environment. +func (mg *Environment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Environment. +func (mg *Environment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this Environment. +func (mg *Environment) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this Environment. +func (mg *Environment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Environment. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Environment) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Environment. +func (mg *Environment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Environment. +func (mg *Environment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Environment. +func (mg *Environment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Environment. +func (mg *Environment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this Environment. +func (mg *Environment) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this Environment. +func (mg *Environment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Environment. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Environment) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Environment. +func (mg *Environment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Environment. +func (mg *Environment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/confluent/v1alpha1/zz_generated.managedlist.go b/apis/confluent/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..8ba540b --- /dev/null +++ b/apis/confluent/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this EnvironmentList. +func (l *EnvironmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/confluent/v1alpha1/zz_generated_terraformed.go b/apis/confluent/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..ddca3c5 --- /dev/null +++ b/apis/confluent/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,88 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/upbound/upjet/pkg/resource" + "github.com/upbound/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Environment +func (mg *Environment) GetTerraformResourceType() string { + return "confluent_environment" +} + +// GetConnectionDetailsMapping for this Environment +func (tr *Environment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Environment +func (tr *Environment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Environment +func (tr *Environment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Environment +func (tr *Environment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Environment +func (tr *Environment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Environment +func (tr *Environment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Environment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Environment) LateInitialize(attrs []byte) (bool, error) { + params := &EnvironmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Environment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/confluent/v1alpha1/zz_groupversion_info.go b/apis/confluent/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..bf3d4c4 --- /dev/null +++ b/apis/confluent/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=confluent.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "confluent.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/generate.go b/apis/generate.go new file mode 100644 index 0000000..7dcd77d --- /dev/null +++ b/apis/generate.go @@ -0,0 +1,39 @@ +//go:build generate +// +build generate + +/* +Copyright 2021 Upbound Inc. +*/ + +// NOTE: See the below link for details on what is happening here. +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + +// Remove existing CRDs +//go:generate rm -rf ../package/crds + +// Remove generated files +//go:generate bash -c "find . -iname 'zz_*' ! -iname 'zz_generated.managed*.go' -delete" +//go:generate bash -c "find . -type d -empty -delete" +//go:generate bash -c "find ../internal/controller -iname 'zz_*' -delete" +//go:generate bash -c "find ../internal/controller -type d -empty -delete" +//go:generate rm -rf ../examples-generated + +// Generate documentation from Terraform docs. +//go:generate go run github.com/upbound/upjet/cmd/scraper -n ${TERRAFORM_PROVIDER_SOURCE} -r ../.work/${TERRAFORM_PROVIDER_SOURCE}/${TERRAFORM_DOCS_PATH} -o ../config/provider-metadata.yaml + +// Run Upjet generator +//go:generate go run ../cmd/generator/main.go .. + +// Generate deepcopy methodsets and CRD manifests +//go:generate go run -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen object:headerFile=../hack/boilerplate.go.txt paths=./... crd:allowDangerousTypes=true,crdVersions=v1 output:artifacts:config=../package/crds + +// Generate crossplane-runtime methodsets (resource.Claim, etc) +//go:generate go run -tags generate github.com/crossplane/crossplane-tools/cmd/angryjet generate-methodsets --header-file=../hack/boilerplate.go.txt ./... + +package apis + +import ( + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" //nolint:typecheck + + _ "github.com/crossplane/crossplane-tools/cmd/angryjet" //nolint:typecheck +) diff --git a/apis/kafka/v1alpha1/zz_acl_types.go b/apis/kafka/v1alpha1/zz_acl_types.go new file mode 100755 index 0000000..b9f6528 --- /dev/null +++ b/apis/kafka/v1alpha1/zz_acl_types.go @@ -0,0 +1,173 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ACLObservation struct { + + // The Cluster API Credentials. + Credentials []CredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The host for the ACL. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + KafkaCluster []KafkaClusterObservation `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The operation type for the ACL. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The pattern type for the ACL. + PatternType *string `json:"patternType,omitempty" tf:"pattern_type,omitempty"` + + // The permission for the ACL. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + // The principal for the ACL. + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // The resource name for the ACL. + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` + + // The type of the resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` +} + +type ACLParameters struct { + + // The Cluster API Credentials. + // +kubebuilder:validation:Optional + Credentials []CredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The host for the ACL. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // +kubebuilder:validation:Optional + KafkaCluster []KafkaClusterParameters `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The operation type for the ACL. + // +kubebuilder:validation:Optional + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The pattern type for the ACL. + // +kubebuilder:validation:Optional + PatternType *string `json:"patternType,omitempty" tf:"pattern_type,omitempty"` + + // The permission for the ACL. + // +kubebuilder:validation:Optional + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + // The principal for the ACL. + // +kubebuilder:validation:Optional + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // The resource name for the ACL. + // +kubebuilder:validation:Optional + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` + + // The type of the resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + // +kubebuilder:validation:Optional + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` +} + +type CredentialsObservation struct { +} + +type CredentialsParameters struct { + + // The Cluster API Key for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // The Cluster API Secret for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + SecretSecretRef v1.SecretKeySelector `json:"secretSecretRef" tf:"-"` +} + +type KafkaClusterObservation struct { + + // The Kafka cluster ID (e.g., `lkc-12345`). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type KafkaClusterParameters struct { + + // The Kafka cluster ID (e.g., `lkc-12345`). + // +kubebuilder:validation:Required + ID *string `json:"id" tf:"id,omitempty"` +} + +// ACLSpec defines the desired state of ACL +type ACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ACLParameters `json:"forProvider"` +} + +// ACLStatus defines the observed state of ACL. +type ACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ACL is the Schema for the ACLs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type ACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.host)",message="host is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.operation)",message="operation is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.patternType)",message="patternType is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.permission)",message="permission is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.principal)",message="principal is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.resourceName)",message="resourceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.resourceType)",message="resourceType is a required parameter" + Spec ACLSpec `json:"spec"` + Status ACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ACLList contains a list of ACLs +type ACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ACL `json:"items"` +} + +// Repository type metadata. +var ( + ACL_Kind = "ACL" + ACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ACL_Kind}.String() + ACL_KindAPIVersion = ACL_Kind + "." + CRDGroupVersion.String() + ACL_GroupVersionKind = CRDGroupVersion.WithKind(ACL_Kind) +) + +func init() { + SchemeBuilder.Register(&ACL{}, &ACLList{}) +} diff --git a/apis/kafka/v1alpha1/zz_cluster_types.go b/apis/kafka/v1alpha1/zz_cluster_types.go new file mode 100755 index 0000000..f0e3ea0 --- /dev/null +++ b/apis/kafka/v1alpha1/zz_cluster_types.go @@ -0,0 +1,206 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BasicObservation struct { +} + +type BasicParameters struct { +} + +type ClusterObservation struct { + + // API Version defines the schema version of this representation of a Kafka cluster. + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // The availability zone configuration of the Kafka cluster. + Availability *string `json:"availability,omitempty" tf:"availability,omitempty"` + + Basic []BasicParameters `json:"basic,omitempty" tf:"basic,omitempty"` + + // The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. + BootstrapEndpoint *string `json:"bootstrapEndpoint,omitempty" tf:"bootstrap_endpoint,omitempty"` + + // The cloud service provider that runs the Kafka cluster. + Cloud *string `json:"cloud,omitempty" tf:"cloud,omitempty"` + + Dedicated []DedicatedObservation `json:"dedicated,omitempty" tf:"dedicated,omitempty"` + + // The name of the Kafka cluster. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + Environment []EnvironmentObservation `json:"environment,omitempty" tf:"environment,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Kind defines the object Kafka cluster represents. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts. + Network []NetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // The Confluent Resource Name of the Kafka cluster suitable for confluent_role_binding's crn_pattern. + RbacCrn *string `json:"rbacCrn,omitempty" tf:"rbac_crn,omitempty"` + + // The cloud service provider region where the Kafka cluster is running. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The REST endpoint of the Kafka cluster. + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + Standard []StandardParameters `json:"standard,omitempty" tf:"standard,omitempty"` +} + +type ClusterParameters struct { + + // The availability zone configuration of the Kafka cluster. + // +kubebuilder:validation:Optional + Availability *string `json:"availability,omitempty" tf:"availability,omitempty"` + + // +kubebuilder:validation:Optional + Basic []BasicParameters `json:"basic,omitempty" tf:"basic,omitempty"` + + // The cloud service provider that runs the Kafka cluster. + // +kubebuilder:validation:Optional + Cloud *string `json:"cloud,omitempty" tf:"cloud,omitempty"` + + // +kubebuilder:validation:Optional + Dedicated []DedicatedParameters `json:"dedicated,omitempty" tf:"dedicated,omitempty"` + + // The name of the Kafka cluster. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + // +kubebuilder:validation:Optional + Environment []EnvironmentParameters `json:"environment,omitempty" tf:"environment,omitempty"` + + // Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts. + // +kubebuilder:validation:Optional + Network []NetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // The cloud service provider region where the Kafka cluster is running. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // +kubebuilder:validation:Optional + Standard []StandardParameters `json:"standard,omitempty" tf:"standard,omitempty"` +} + +type DedicatedObservation struct { + + // The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. MULTI_ZONE dedicated clusters must have at least two CKUs. + Cku *float64 `json:"cku,omitempty" tf:"cku,omitempty"` + + // The ID of the encryption key that is used to encrypt the data in the Kafka cluster. + EncryptionKey *string `json:"encryptionKey,omitempty" tf:"encryption_key,omitempty"` +} + +type DedicatedParameters struct { + + // The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. MULTI_ZONE dedicated clusters must have at least two CKUs. + // +kubebuilder:validation:Required + Cku *float64 `json:"cku" tf:"cku,omitempty"` + + // The ID of the encryption key that is used to encrypt the data in the Kafka cluster. + // +kubebuilder:validation:Optional + EncryptionKey *string `json:"encryptionKey,omitempty" tf:"encryption_key,omitempty"` +} + +type EnvironmentObservation struct { + + // The unique identifier for the environment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type EnvironmentParameters struct { + + // The unique identifier for the environment. + // +kubebuilder:validation:Required + ID *string `json:"id" tf:"id,omitempty"` +} + +type NetworkObservation struct { + + // The unique identifier for the network. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type NetworkParameters struct { + + // The unique identifier for the network. + // +kubebuilder:validation:Required + ID *string `json:"id" tf:"id,omitempty"` +} + +type StandardObservation struct { +} + +type StandardParameters struct { +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Cluster is the Schema for the Clusters API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.availability)",message="availability is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.cloud)",message="cloud is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.displayName)",message="displayName is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.environment)",message="environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.region)",message="region is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/kafka/v1alpha1/zz_clusterconfig_types.go b/apis/kafka/v1alpha1/zz_clusterconfig_types.go new file mode 100755 index 0000000..2dd590c --- /dev/null +++ b/apis/kafka/v1alpha1/zz_clusterconfig_types.go @@ -0,0 +1,125 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterConfigCredentialsObservation struct { +} + +type ClusterConfigCredentialsParameters struct { + + // The Cluster API Key for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // The Cluster API Secret for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + SecretSecretRef v1.SecretKeySelector `json:"secretSecretRef" tf:"-"` +} + +type ClusterConfigKafkaClusterObservation struct { + + // The Kafka cluster ID (e.g., `lkc-12345`). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type ClusterConfigKafkaClusterParameters struct { + + // The Kafka cluster ID (e.g., `lkc-12345`). + // +kubebuilder:validation:Required + ID *string `json:"id" tf:"id,omitempty"` +} + +type ClusterConfigObservation struct { + + // The custom cluster settings to set (e.g., `"num.partitions" = "8"`). + Config map[string]*string `json:"config,omitempty" tf:"config,omitempty"` + + // The Cluster API Credentials. + Credentials []ClusterConfigCredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + KafkaCluster []ClusterConfigKafkaClusterObservation `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` +} + +type ClusterConfigParameters struct { + + // The custom cluster settings to set (e.g., `"num.partitions" = "8"`). + // +kubebuilder:validation:Optional + Config map[string]*string `json:"config,omitempty" tf:"config,omitempty"` + + // The Cluster API Credentials. + // +kubebuilder:validation:Optional + Credentials []ClusterConfigCredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // +kubebuilder:validation:Optional + KafkaCluster []ClusterConfigKafkaClusterParameters `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + // +kubebuilder:validation:Optional + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` +} + +// ClusterConfigSpec defines the desired state of ClusterConfig +type ClusterConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterConfigParameters `json:"forProvider"` +} + +// ClusterConfigStatus defines the observed state of ClusterConfig. +type ClusterConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterConfig is the Schema for the ClusterConfigs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type ClusterConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.config)",message="config is a required parameter" + Spec ClusterConfigSpec `json:"spec"` + Status ClusterConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterConfigList contains a list of ClusterConfigs +type ClusterConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterConfig `json:"items"` +} + +// Repository type metadata. +var ( + ClusterConfig_Kind = "ClusterConfig" + ClusterConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ClusterConfig_Kind}.String() + ClusterConfig_KindAPIVersion = ClusterConfig_Kind + "." + CRDGroupVersion.String() + ClusterConfig_GroupVersionKind = CRDGroupVersion.WithKind(ClusterConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&ClusterConfig{}, &ClusterConfigList{}) +} diff --git a/apis/kafka/v1alpha1/zz_generated.deepcopy.go b/apis/kafka/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..c93ee95 --- /dev/null +++ b/apis/kafka/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1027 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACL) DeepCopyInto(out *ACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACL. +func (in *ACL) DeepCopy() *ACL { + if in == nil { + return nil + } + out := new(ACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLList) DeepCopyInto(out *ACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLList. +func (in *ACLList) DeepCopy() *ACLList { + if in == nil { + return nil + } + out := new(ACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLObservation) DeepCopyInto(out *ACLObservation) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]CredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]KafkaClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PatternType != nil { + in, out := &in.PatternType, &out.PatternType + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLObservation. +func (in *ACLObservation) DeepCopy() *ACLObservation { + if in == nil { + return nil + } + out := new(ACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLParameters) DeepCopyInto(out *ACLParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]CredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]KafkaClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PatternType != nil { + in, out := &in.PatternType, &out.PatternType + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLParameters. +func (in *ACLParameters) DeepCopy() *ACLParameters { + if in == nil { + return nil + } + out := new(ACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLSpec) DeepCopyInto(out *ACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLSpec. +func (in *ACLSpec) DeepCopy() *ACLSpec { + if in == nil { + return nil + } + out := new(ACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLStatus) DeepCopyInto(out *ACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLStatus. +func (in *ACLStatus) DeepCopy() *ACLStatus { + if in == nil { + return nil + } + out := new(ACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicObservation) DeepCopyInto(out *BasicObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicObservation. +func (in *BasicObservation) DeepCopy() *BasicObservation { + if in == nil { + return nil + } + out := new(BasicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicParameters) DeepCopyInto(out *BasicParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicParameters. +func (in *BasicParameters) DeepCopy() *BasicParameters { + if in == nil { + return nil + } + out := new(BasicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfig. +func (in *ClusterConfig) DeepCopy() *ClusterConfig { + if in == nil { + return nil + } + out := new(ClusterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigCredentialsObservation) DeepCopyInto(out *ClusterConfigCredentialsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigCredentialsObservation. +func (in *ClusterConfigCredentialsObservation) DeepCopy() *ClusterConfigCredentialsObservation { + if in == nil { + return nil + } + out := new(ClusterConfigCredentialsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigCredentialsParameters) DeepCopyInto(out *ClusterConfigCredentialsParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + out.SecretSecretRef = in.SecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigCredentialsParameters. +func (in *ClusterConfigCredentialsParameters) DeepCopy() *ClusterConfigCredentialsParameters { + if in == nil { + return nil + } + out := new(ClusterConfigCredentialsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigKafkaClusterObservation) DeepCopyInto(out *ClusterConfigKafkaClusterObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigKafkaClusterObservation. +func (in *ClusterConfigKafkaClusterObservation) DeepCopy() *ClusterConfigKafkaClusterObservation { + if in == nil { + return nil + } + out := new(ClusterConfigKafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigKafkaClusterParameters) DeepCopyInto(out *ClusterConfigKafkaClusterParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigKafkaClusterParameters. +func (in *ClusterConfigKafkaClusterParameters) DeepCopy() *ClusterConfigKafkaClusterParameters { + if in == nil { + return nil + } + out := new(ClusterConfigKafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigList) DeepCopyInto(out *ClusterConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigList. +func (in *ClusterConfigList) DeepCopy() *ClusterConfigList { + if in == nil { + return nil + } + out := new(ClusterConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]ClusterConfigCredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]ClusterConfigKafkaClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]ClusterConfigCredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]ClusterConfigKafkaClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigSpec) DeepCopyInto(out *ClusterConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigSpec. +func (in *ClusterConfigSpec) DeepCopy() *ClusterConfigSpec { + if in == nil { + return nil + } + out := new(ClusterConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigStatus) DeepCopyInto(out *ClusterConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigStatus. +func (in *ClusterConfigStatus) DeepCopy() *ClusterConfigStatus { + if in == nil { + return nil + } + out := new(ClusterConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.Availability != nil { + in, out := &in.Availability, &out.Availability + *out = new(string) + **out = **in + } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = make([]BasicParameters, len(*in)) + copy(*out, *in) + } + if in.BootstrapEndpoint != nil { + in, out := &in.BootstrapEndpoint, &out.BootstrapEndpoint + *out = new(string) + **out = **in + } + if in.Cloud != nil { + in, out := &in.Cloud, &out.Cloud + *out = new(string) + **out = **in + } + if in.Dedicated != nil { + in, out := &in.Dedicated, &out.Dedicated + *out = make([]DedicatedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]EnvironmentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = make([]NetworkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RbacCrn != nil { + in, out := &in.RbacCrn, &out.RbacCrn + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]StandardParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.Availability != nil { + in, out := &in.Availability, &out.Availability + *out = new(string) + **out = **in + } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = make([]BasicParameters, len(*in)) + copy(*out, *in) + } + if in.Cloud != nil { + in, out := &in.Cloud, &out.Cloud + *out = new(string) + **out = **in + } + if in.Dedicated != nil { + in, out := &in.Dedicated, &out.Dedicated + *out = make([]DedicatedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]EnvironmentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = make([]NetworkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]StandardParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsObservation) DeepCopyInto(out *CredentialsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsObservation. +func (in *CredentialsObservation) DeepCopy() *CredentialsObservation { + if in == nil { + return nil + } + out := new(CredentialsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsParameters) DeepCopyInto(out *CredentialsParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + out.SecretSecretRef = in.SecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsParameters. +func (in *CredentialsParameters) DeepCopy() *CredentialsParameters { + if in == nil { + return nil + } + out := new(CredentialsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedObservation) DeepCopyInto(out *DedicatedObservation) { + *out = *in + if in.Cku != nil { + in, out := &in.Cku, &out.Cku + *out = new(float64) + **out = **in + } + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedObservation. +func (in *DedicatedObservation) DeepCopy() *DedicatedObservation { + if in == nil { + return nil + } + out := new(DedicatedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedParameters) DeepCopyInto(out *DedicatedParameters) { + *out = *in + if in.Cku != nil { + in, out := &in.Cku, &out.Cku + *out = new(float64) + **out = **in + } + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedParameters. +func (in *DedicatedParameters) DeepCopy() *DedicatedParameters { + if in == nil { + return nil + } + out := new(DedicatedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentObservation) DeepCopyInto(out *EnvironmentObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentObservation. +func (in *EnvironmentObservation) DeepCopy() *EnvironmentObservation { + if in == nil { + return nil + } + out := new(EnvironmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentParameters) DeepCopyInto(out *EnvironmentParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentParameters. +func (in *EnvironmentParameters) DeepCopy() *EnvironmentParameters { + if in == nil { + return nil + } + out := new(EnvironmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterObservation) DeepCopyInto(out *KafkaClusterObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterObservation. +func (in *KafkaClusterObservation) DeepCopy() *KafkaClusterObservation { + if in == nil { + return nil + } + out := new(KafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterParameters) DeepCopyInto(out *KafkaClusterParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterParameters. +func (in *KafkaClusterParameters) DeepCopy() *KafkaClusterParameters { + if in == nil { + return nil + } + out := new(KafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkObservation) DeepCopyInto(out *NetworkObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkObservation. +func (in *NetworkObservation) DeepCopy() *NetworkObservation { + if in == nil { + return nil + } + out := new(NetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParameters) DeepCopyInto(out *NetworkParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParameters. +func (in *NetworkParameters) DeepCopy() *NetworkParameters { + if in == nil { + return nil + } + out := new(NetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardObservation) DeepCopyInto(out *StandardObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardObservation. +func (in *StandardObservation) DeepCopy() *StandardObservation { + if in == nil { + return nil + } + out := new(StandardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardParameters) DeepCopyInto(out *StandardParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardParameters. +func (in *StandardParameters) DeepCopy() *StandardParameters { + if in == nil { + return nil + } + out := new(StandardParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kafka/v1alpha1/zz_generated.managed.go b/apis/kafka/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..6b0d0a7 --- /dev/null +++ b/apis/kafka/v1alpha1/zz_generated.managed.go @@ -0,0 +1,236 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ACL. +func (mg *ACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ACL. +func (mg *ACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this ACL. +func (mg *ACL) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this ACL. +func (mg *ACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this ACL. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *ACL) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this ACL. +func (mg *ACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ACL. +func (mg *ACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ACL. +func (mg *ACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ACL. +func (mg *ACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this ACL. +func (mg *ACL) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this ACL. +func (mg *ACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this ACL. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *ACL) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this ACL. +func (mg *ACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ACL. +func (mg *ACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this Cluster. +func (mg *Cluster) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Cluster. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Cluster) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this Cluster. +func (mg *Cluster) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Cluster. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Cluster) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ClusterConfig. +func (mg *ClusterConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ClusterConfig. +func (mg *ClusterConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this ClusterConfig. +func (mg *ClusterConfig) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this ClusterConfig. +func (mg *ClusterConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this ClusterConfig. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *ClusterConfig) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this ClusterConfig. +func (mg *ClusterConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ClusterConfig. +func (mg *ClusterConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ClusterConfig. +func (mg *ClusterConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ClusterConfig. +func (mg *ClusterConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this ClusterConfig. +func (mg *ClusterConfig) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this ClusterConfig. +func (mg *ClusterConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this ClusterConfig. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *ClusterConfig) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this ClusterConfig. +func (mg *ClusterConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ClusterConfig. +func (mg *ClusterConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kafka/v1alpha1/zz_generated.managedlist.go b/apis/kafka/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..7dbbf9c --- /dev/null +++ b/apis/kafka/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ACLList. +func (l *ACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ClusterConfigList. +func (l *ClusterConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kafka/v1alpha1/zz_generated_terraformed.go b/apis/kafka/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..56120ab --- /dev/null +++ b/apis/kafka/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,236 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/upbound/upjet/pkg/resource" + "github.com/upbound/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ACL +func (mg *ACL) GetTerraformResourceType() string { + return "confluent_kafka_acl" +} + +// GetConnectionDetailsMapping for this ACL +func (tr *ACL) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"credentials[*].key": "spec.forProvider.credentials[*].keySecretRef", "credentials[*].secret": "spec.forProvider.credentials[*].secretSecretRef"} +} + +// GetObservation of this ACL +func (tr *ACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ACL +func (tr *ACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ACL +func (tr *ACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ACL +func (tr *ACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ACL +func (tr *ACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this ACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ACL) LateInitialize(attrs []byte) (bool, error) { + params := &ACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ACL) GetTerraformSchemaVersion() int { + return 2 +} + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "confluent_kafka_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 1 +} + +// GetTerraformResourceType returns Terraform resource type for this ClusterConfig +func (mg *ClusterConfig) GetTerraformResourceType() string { + return "confluent_kafka_cluster_config" +} + +// GetConnectionDetailsMapping for this ClusterConfig +func (tr *ClusterConfig) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"credentials[*].key": "spec.forProvider.credentials[*].keySecretRef", "credentials[*].secret": "spec.forProvider.credentials[*].secretSecretRef"} +} + +// GetObservation of this ClusterConfig +func (tr *ClusterConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ClusterConfig +func (tr *ClusterConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ClusterConfig +func (tr *ClusterConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ClusterConfig +func (tr *ClusterConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ClusterConfig +func (tr *ClusterConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this ClusterConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ClusterConfig) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ClusterConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kafka/v1alpha1/zz_groupversion_info.go b/apis/kafka/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..9408c3d --- /dev/null +++ b/apis/kafka/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kafka.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kafka.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/role/v1alpha1/zz_binding_types.go b/apis/role/v1alpha1/zz_binding_types.go new file mode 100755 index 0000000..2be0254 --- /dev/null +++ b/apis/role/v1alpha1/zz_binding_types.go @@ -0,0 +1,95 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BindingObservation struct { + + // A CRN that specifies the scope and resource patterns necessary for the role to bind. + CrnPattern *string `json:"crnPattern,omitempty" tf:"crn_pattern,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The principal User to bind the role to. + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // The name of the role to bind to the principal. + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` +} + +type BindingParameters struct { + + // A CRN that specifies the scope and resource patterns necessary for the role to bind. + // +kubebuilder:validation:Optional + CrnPattern *string `json:"crnPattern,omitempty" tf:"crn_pattern,omitempty"` + + // The principal User to bind the role to. + // +kubebuilder:validation:Optional + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // The name of the role to bind to the principal. + // +kubebuilder:validation:Optional + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` +} + +// BindingSpec defines the desired state of Binding +type BindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BindingParameters `json:"forProvider"` +} + +// BindingStatus defines the observed state of Binding. +type BindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Binding is the Schema for the Bindings API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type Binding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.crnPattern)",message="crnPattern is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.principal)",message="principal is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.roleName)",message="roleName is a required parameter" + Spec BindingSpec `json:"spec"` + Status BindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BindingList contains a list of Bindings +type BindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Binding `json:"items"` +} + +// Repository type metadata. +var ( + Binding_Kind = "Binding" + Binding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Binding_Kind}.String() + Binding_KindAPIVersion = Binding_Kind + "." + CRDGroupVersion.String() + Binding_GroupVersionKind = CRDGroupVersion.WithKind(Binding_Kind) +) + +func init() { + SchemeBuilder.Register(&Binding{}, &BindingList{}) +} diff --git a/apis/role/v1alpha1/zz_generated.deepcopy.go b/apis/role/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ecfbdba --- /dev/null +++ b/apis/role/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,172 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingList) DeepCopyInto(out *BindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Binding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingList. +func (in *BindingList) DeepCopy() *BindingList { + if in == nil { + return nil + } + out := new(BindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingObservation) DeepCopyInto(out *BindingObservation) { + *out = *in + if in.CrnPattern != nil { + in, out := &in.CrnPattern, &out.CrnPattern + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingObservation. +func (in *BindingObservation) DeepCopy() *BindingObservation { + if in == nil { + return nil + } + out := new(BindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingParameters) DeepCopyInto(out *BindingParameters) { + *out = *in + if in.CrnPattern != nil { + in, out := &in.CrnPattern, &out.CrnPattern + *out = new(string) + **out = **in + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingParameters. +func (in *BindingParameters) DeepCopy() *BindingParameters { + if in == nil { + return nil + } + out := new(BindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingSpec) DeepCopyInto(out *BindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingSpec. +func (in *BindingSpec) DeepCopy() *BindingSpec { + if in == nil { + return nil + } + out := new(BindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingStatus) DeepCopyInto(out *BindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingStatus. +func (in *BindingStatus) DeepCopy() *BindingStatus { + if in == nil { + return nil + } + out := new(BindingStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/role/v1alpha1/zz_generated.managed.go b/apis/role/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..b28e74b --- /dev/null +++ b/apis/role/v1alpha1/zz_generated.managed.go @@ -0,0 +1,84 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Binding. +func (mg *Binding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Binding. +func (mg *Binding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this Binding. +func (mg *Binding) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this Binding. +func (mg *Binding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Binding. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Binding) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Binding. +func (mg *Binding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Binding. +func (mg *Binding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Binding. +func (mg *Binding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Binding. +func (mg *Binding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this Binding. +func (mg *Binding) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this Binding. +func (mg *Binding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Binding. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Binding) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Binding. +func (mg *Binding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Binding. +func (mg *Binding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/role/v1alpha1/zz_generated.managedlist.go b/apis/role/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..68e9d26 --- /dev/null +++ b/apis/role/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BindingList. +func (l *BindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/role/v1alpha1/zz_generated_terraformed.go b/apis/role/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..37d6ff2 --- /dev/null +++ b/apis/role/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,88 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/upbound/upjet/pkg/resource" + "github.com/upbound/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Binding +func (mg *Binding) GetTerraformResourceType() string { + return "confluent_role_binding" +} + +// GetConnectionDetailsMapping for this Binding +func (tr *Binding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Binding +func (tr *Binding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Binding +func (tr *Binding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Binding +func (tr *Binding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Binding +func (tr *Binding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Binding +func (tr *Binding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Binding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Binding) LateInitialize(attrs []byte) (bool, error) { + params := &BindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Binding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/role/v1alpha1/zz_groupversion_info.go b/apis/role/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..01af4f5 --- /dev/null +++ b/apis/role/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=role.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "role.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/service/v1alpha1/zz_account_types.go b/apis/service/v1alpha1/zz_account_types.go new file mode 100755 index 0000000..3bba5d0 --- /dev/null +++ b/apis/service/v1alpha1/zz_account_types.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountObservation struct { + + // API Version defines the schema version of this representation of a Service Account. + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // A free-form description of the Service Account. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A human-readable name for the Service Account. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Kind defines the object Service Account represents. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` +} + +type AccountParameters struct { + + // A free-form description of the Service Account. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A human-readable name for the Service Account. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Account is the Schema for the Accounts API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.displayName)",message="displayName is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/service/v1alpha1/zz_generated.deepcopy.go b/apis/service/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..22c10e6 --- /dev/null +++ b/apis/service/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,172 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/service/v1alpha1/zz_generated.managed.go b/apis/service/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..e4aba79 --- /dev/null +++ b/apis/service/v1alpha1/zz_generated.managed.go @@ -0,0 +1,84 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this Account. +func (mg *Account) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Account. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Account) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this Account. +func (mg *Account) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Account. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Account) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/service/v1alpha1/zz_generated.managedlist.go b/apis/service/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..f451b33 --- /dev/null +++ b/apis/service/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/service/v1alpha1/zz_generated_terraformed.go b/apis/service/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..fe68973 --- /dev/null +++ b/apis/service/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,88 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/upbound/upjet/pkg/resource" + "github.com/upbound/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "confluent_service_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/service/v1alpha1/zz_groupversion_info.go b/apis/service/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..d9effa2 --- /dev/null +++ b/apis/service/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=service.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "service.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/v1alpha1/doc.go b/apis/v1alpha1/doc.go new file mode 100644 index 0000000..d060da4 --- /dev/null +++ b/apis/v1alpha1/doc.go @@ -0,0 +1,9 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +// Package v1alpha1 contains the core resources of the confluent jet provider. +// +kubebuilder:object:generate=true +// +groupName=confluent.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 diff --git a/apis/v1alpha1/register.go b/apis/v1alpha1/register.go new file mode 100644 index 0000000..cbe2fe5 --- /dev/null +++ b/apis/v1alpha1/register.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package v1alpha1 + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "confluent.crossplane.io" + Version = "v1alpha1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) + +// StoreConfig type metadata. +var ( + StoreConfigKind = reflect.TypeOf(StoreConfig{}).Name() + StoreConfigGroupKind = schema.GroupKind{Group: Group, Kind: StoreConfigKind}.String() + StoreConfigKindAPIVersion = StoreConfigKind + "." + SchemeGroupVersion.String() + StoreConfigGroupVersionKind = SchemeGroupVersion.WithKind(StoreConfigKind) +) + +func init() { + SchemeBuilder.Register(&StoreConfig{}, &StoreConfigList{}) +} diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go new file mode 100644 index 0000000..a8cab41 --- /dev/null +++ b/apis/v1alpha1/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// A StoreConfigSpec defines the desired state of a ProviderConfig. +type StoreConfigSpec struct { + xpv1.SecretStoreConfig `json:",inline"` +} + +// A StoreConfigStatus represents the status of a StoreConfig. +type StoreConfigStatus struct { + xpv1.ConditionedStatus `json:",inline"` +} + +// +kubebuilder:object:root=true + +// A StoreConfig configures how GCP controller should store connection details. +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="TYPE",type="string",JSONPath=".spec.type" +// +kubebuilder:printcolumn:name="DEFAULT-SCOPE",type="string",JSONPath=".spec.defaultScope" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,store,gcp} +// +kubebuilder:subresource:status +type StoreConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StoreConfigSpec `json:"spec"` + Status StoreConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StoreConfigList contains a list of StoreConfig +type StoreConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StoreConfig `json:"items"` +} + +// Note(turkenh): To be generated with AngryJet + +// GetStoreConfig returns SecretStoreConfig +func (in *StoreConfig) GetStoreConfig() xpv1.SecretStoreConfig { + return in.Spec.SecretStoreConfig +} + +// GetCondition of this StoreConfig. +func (in *StoreConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return in.Status.GetCondition(ct) +} + +// SetConditions of this StoreConfig. +func (in *StoreConfig) SetConditions(c ...xpv1.Condition) { + in.Status.SetConditions(c...) +} diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..9d284ba --- /dev/null +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,105 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfig) DeepCopyInto(out *StoreConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfig. +func (in *StoreConfig) DeepCopy() *StoreConfig { + if in == nil { + return nil + } + out := new(StoreConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigList) DeepCopyInto(out *StoreConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StoreConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigList. +func (in *StoreConfigList) DeepCopy() *StoreConfigList { + if in == nil { + return nil + } + out := new(StoreConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigSpec) DeepCopyInto(out *StoreConfigSpec) { + *out = *in + in.SecretStoreConfig.DeepCopyInto(&out.SecretStoreConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigSpec. +func (in *StoreConfigSpec) DeepCopy() *StoreConfigSpec { + if in == nil { + return nil + } + out := new(StoreConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigStatus) DeepCopyInto(out *StoreConfigStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigStatus. +func (in *StoreConfigStatus) DeepCopy() *StoreConfigStatus { + if in == nil { + return nil + } + out := new(StoreConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/v1beta1/doc.go b/apis/v1beta1/doc.go new file mode 100644 index 0000000..3491337 --- /dev/null +++ b/apis/v1beta1/doc.go @@ -0,0 +1,9 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Package v1beta1 contains the core resources of the confluent upjet provider. +// +kubebuilder:object:generate=true +// +groupName=confluent.crossplane.io +// +versionName=v1beta1 +package v1beta1 diff --git a/apis/v1beta1/register.go b/apis/v1beta1/register.go new file mode 100644 index 0000000..ba5238e --- /dev/null +++ b/apis/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +package v1beta1 + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "confluent.crossplane.io" + Version = "v1beta1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) + +// ProviderConfig type metadata. +var ( + ProviderConfigKind = reflect.TypeOf(ProviderConfig{}).Name() + ProviderConfigGroupKind = schema.GroupKind{Group: Group, Kind: ProviderConfigKind}.String() + ProviderConfigKindAPIVersion = ProviderConfigKind + "." + SchemeGroupVersion.String() + ProviderConfigGroupVersionKind = SchemeGroupVersion.WithKind(ProviderConfigKind) +) + +// ProviderConfigUsage type metadata. +var ( + ProviderConfigUsageKind = reflect.TypeOf(ProviderConfigUsage{}).Name() + ProviderConfigUsageGroupKind = schema.GroupKind{Group: Group, Kind: ProviderConfigUsageKind}.String() + ProviderConfigUsageKindAPIVersion = ProviderConfigUsageKind + "." + SchemeGroupVersion.String() + ProviderConfigUsageGroupVersionKind = SchemeGroupVersion.WithKind(ProviderConfigUsageKind) + + ProviderConfigUsageListKind = reflect.TypeOf(ProviderConfigUsageList{}).Name() + ProviderConfigUsageListGroupKind = schema.GroupKind{Group: Group, Kind: ProviderConfigUsageListKind}.String() + ProviderConfigUsageListKindAPIVersion = ProviderConfigUsageListKind + "." + SchemeGroupVersion.String() + ProviderConfigUsageListGroupVersionKind = SchemeGroupVersion.WithKind(ProviderConfigUsageListKind) +) + +func init() { + SchemeBuilder.Register(&ProviderConfig{}, &ProviderConfigList{}) + SchemeBuilder.Register(&ProviderConfigUsage{}, &ProviderConfigUsageList{}) +} diff --git a/apis/v1beta1/types.go b/apis/v1beta1/types.go new file mode 100644 index 0000000..6bfbcb1 --- /dev/null +++ b/apis/v1beta1/types.go @@ -0,0 +1,80 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// A ProviderConfigSpec defines the desired state of a ProviderConfig. +type ProviderConfigSpec struct { + // Credentials required to authenticate to this provider. + Credentials ProviderCredentials `json:"credentials"` +} + +// ProviderCredentials required to authenticate. +type ProviderCredentials struct { + // Source of the provider credentials. + // +kubebuilder:validation:Enum=None;Secret;InjectedIdentity;Environment;Filesystem + Source xpv1.CredentialsSource `json:"source"` + + xpv1.CommonCredentialSelectors `json:",inline"` +} + +// A ProviderConfigStatus reflects the observed state of a ProviderConfig. +type ProviderConfigStatus struct { + xpv1.ProviderConfigStatus `json:",inline"` +} + +// +kubebuilder:object:root=true + +// A ProviderConfig configures a Confluent provider. +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="SECRET-NAME",type="string",JSONPath=".spec.credentials.secretRef.name",priority=1 +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:resource:scope=Cluster,categories={crossplane,provider,confluent} +type ProviderConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ProviderConfigSpec `json:"spec"` + Status ProviderConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProviderConfigList contains a list of ProviderConfig. +type ProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProviderConfig `json:"items"` +} + +// +kubebuilder:object:root=true + +// A ProviderConfigUsage indicates that a resource is using a ProviderConfig. +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="CONFIG-NAME",type="string",JSONPath=".providerConfigRef.name" +// +kubebuilder:printcolumn:name="RESOURCE-KIND",type="string",JSONPath=".resourceRef.kind" +// +kubebuilder:printcolumn:name="RESOURCE-NAME",type="string",JSONPath=".resourceRef.name" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,provider,confluent} +type ProviderConfigUsage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + xpv1.ProviderConfigUsage `json:",inline"` +} + +// +kubebuilder:object:root=true + +// ProviderConfigUsageList contains a list of ProviderConfigUsage +type ProviderConfigUsageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProviderConfigUsage `json:"items"` +} diff --git a/apis/v1beta1/zz_generated.deepcopy.go b/apis/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ae71405 --- /dev/null +++ b/apis/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. +func (in *ProviderConfig) DeepCopy() *ProviderConfig { + if in == nil { + return nil + } + out := new(ProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigList) DeepCopyInto(out *ProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigList. +func (in *ProviderConfigList) DeepCopy() *ProviderConfigList { + if in == nil { + return nil + } + out := new(ProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigSpec) DeepCopyInto(out *ProviderConfigSpec) { + *out = *in + in.Credentials.DeepCopyInto(&out.Credentials) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigSpec. +func (in *ProviderConfigSpec) DeepCopy() *ProviderConfigSpec { + if in == nil { + return nil + } + out := new(ProviderConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) { + *out = *in + in.ProviderConfigStatus.DeepCopyInto(&out.ProviderConfigStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus. +func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus { + if in == nil { + return nil + } + out := new(ProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.ProviderConfigUsage.DeepCopyInto(&out.ProviderConfigUsage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage. +func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage { + if in == nil { + return nil + } + out := new(ProviderConfigUsage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigUsage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsageList) DeepCopyInto(out *ProviderConfigUsageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProviderConfigUsage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsageList. +func (in *ProviderConfigUsageList) DeepCopy() *ProviderConfigUsageList { + if in == nil { + return nil + } + out := new(ProviderConfigUsageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigUsageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderCredentials) DeepCopyInto(out *ProviderCredentials) { + *out = *in + in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderCredentials. +func (in *ProviderCredentials) DeepCopy() *ProviderCredentials { + if in == nil { + return nil + } + out := new(ProviderCredentials) + in.DeepCopyInto(out) + return out +} diff --git a/apis/v1beta1/zz_generated.pc.go b/apis/v1beta1/zz_generated.pc.go new file mode 100644 index 0000000..c597006 --- /dev/null +++ b/apis/v1beta1/zz_generated.pc.go @@ -0,0 +1,28 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ProviderConfig. +func (p *ProviderConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return p.Status.GetCondition(ct) +} + +// GetUsers of this ProviderConfig. +func (p *ProviderConfig) GetUsers() int64 { + return p.Status.Users +} + +// SetConditions of this ProviderConfig. +func (p *ProviderConfig) SetConditions(c ...xpv1.Condition) { + p.Status.SetConditions(c...) +} + +// SetUsers of this ProviderConfig. +func (p *ProviderConfig) SetUsers(i int64) { + p.Status.Users = i +} diff --git a/apis/v1beta1/zz_generated.pcu.go b/apis/v1beta1/zz_generated.pcu.go new file mode 100644 index 0000000..a4f4986 --- /dev/null +++ b/apis/v1beta1/zz_generated.pcu.go @@ -0,0 +1,28 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetProviderConfigReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) GetProviderConfigReference() xpv1.Reference { + return p.ProviderConfigReference +} + +// GetResourceReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) GetResourceReference() xpv1.TypedReference { + return p.ResourceReference +} + +// SetProviderConfigReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) SetProviderConfigReference(r xpv1.Reference) { + p.ProviderConfigReference = r +} + +// SetResourceReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) SetResourceReference(r xpv1.TypedReference) { + p.ResourceReference = r +} diff --git a/apis/v1beta1/zz_generated.pculist.go b/apis/v1beta1/zz_generated.pculist.go new file mode 100644 index 0000000..b3ae1b4 --- /dev/null +++ b/apis/v1beta1/zz_generated.pculist.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ProviderConfigUsageList. +func (p *ProviderConfigUsageList) GetItems() []resource.ProviderConfigUsage { + items := make([]resource.ProviderConfigUsage, len(p.Items)) + for i := range p.Items { + items[i] = &p.Items[i] + } + return items +} diff --git a/apis/zz_register.go b/apis/zz_register.go new file mode 100755 index 0000000..d37d657 --- /dev/null +++ b/apis/zz_register.go @@ -0,0 +1,41 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// Package apis contains Kubernetes API for the provider. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/api/v1alpha1" + v1alpha1confluent "github.com/crossplane-contrib/provider-confluent/apis/confluent/v1alpha1" + v1alpha1kafka "github.com/crossplane-contrib/provider-confluent/apis/kafka/v1alpha1" + v1alpha1role "github.com/crossplane-contrib/provider-confluent/apis/role/v1alpha1" + v1alpha1service "github.com/crossplane-contrib/provider-confluent/apis/service/v1alpha1" + v1alpha1apis "github.com/crossplane-contrib/provider-confluent/apis/v1alpha1" + v1beta1 "github.com/crossplane-contrib/provider-confluent/apis/v1beta1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, + v1alpha1.SchemeBuilder.AddToScheme, + v1alpha1confluent.SchemeBuilder.AddToScheme, + v1alpha1kafka.SchemeBuilder.AddToScheme, + v1alpha1role.SchemeBuilder.AddToScheme, + v1alpha1service.SchemeBuilder.AddToScheme, + v1alpha1apis.SchemeBuilder.AddToScheme, + v1beta1.SchemeBuilder.AddToScheme, + ) +} + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/build/.gitignore b/build/.gitignore new file mode 100644 index 0000000..f53d9c2 --- /dev/null +++ b/build/.gitignore @@ -0,0 +1,3 @@ +# ignore vscode files (debug config etc...) +/.vscode +/.idea \ No newline at end of file diff --git a/build/CODEOWNERS b/build/CODEOWNERS new file mode 100644 index 0000000..a4826c3 --- /dev/null +++ b/build/CODEOWNERS @@ -0,0 +1,11 @@ +# This file controls automatic PR reviewer assignment. See the following docs: +# +# * https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners +# * https://docs.github.com/en/organizations/organizing-members-into-teams/managing-code-review-settings-for-your-team +# +# The goal of this file is for most PRs to automatically and fairly have one +# maintainer set as PR reviewers. All maintainers have permission to approve +# and merge PRs. All PRs must be approved by at least one maintainer before being merged. + +# Fallback owners +* @turkenh @pedjak @phisco @ezgidemirel @lsviben diff --git a/build/CODE_OF_CONDUCT.md b/build/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..a3ee6a8 --- /dev/null +++ b/build/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +This project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/build/CONTRIBUTING.md b/build/CONTRIBUTING.md new file mode 100644 index 0000000..4658355 --- /dev/null +++ b/build/CONTRIBUTING.md @@ -0,0 +1,56 @@ +## How to Contribute + +The Build project is under [Apache 2.0 license](LICENSE). We accept contributions via +GitHub pull requests. This document outlines some of the conventions related to +development workflow, commit message formatting, contact points and other +resources to make it easier to get your contribution accepted. + +## Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. See the [DCO](DCO) file for details. + +Contributors sign-off that they adhere to these requirements by adding a +Signed-off-by line to commit messages. For example: + +``` +This is my commit message + +Signed-off-by: Random J Developer +``` + +Git even has a -s command line option to append this automatically to your +commit message: + +``` +$ git commit -s -m 'This is my commit message' +``` + +If you have already made a commit and forgot to include the sign-off, you can amend your last commit +to add the sign-off with the following command, which can then be force pushed. + +``` +git commit --amend -s +``` + +We use a [DCO bot](https://github.com/apps/dco) to enforce the DCO on each pull +request and branch commits. + +## Getting Started + +- Fork the repository on GitHub +- Read the [install](INSTALL.md) for build and test instructions +- Play with the project, open issues, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a branch from where you want to base your work (usually master). +- Make your changes and arrange them in readable commits. +- Make sure your commit messages are clear about what changed and why. +- Make sure all tests pass, and add any new tests as appropriate. +- Push your changes to the branch in your fork of the repository. +- Submit a pull request to the original repository. diff --git a/build/DCO b/build/DCO new file mode 100644 index 0000000..716561d --- /dev/null +++ b/build/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/build/LICENSE b/build/LICENSE new file mode 100644 index 0000000..fb5d49b --- /dev/null +++ b/build/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Upbound Authors. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/build/README.md b/build/README.md new file mode 100644 index 0000000..0302e22 --- /dev/null +++ b/build/README.md @@ -0,0 +1,92 @@ +# Build + +This project contains common build scripts that are used by a number of open source projects at Upbound. We standardize on a makefile interface to all projects with simple verbs. We attempt to put more logic in makefiles and closer to the project and less on side of CI. This enables the builds to run on multiple environments including laptops and different CI/CD systems. Most build and test operations are designed to run in a container that contains all the tools necessary. + +Some features include: + * support for multiple platforms and architectures + * support for Linux and macOS hosts + * support for build channels for releases + * support for releasing artifacts + * support for golang, helm, kubernetes, nodejs, yarn, s3, and docker + +# Quickstart + +To use this build project just add a submodule to your project: + +``` +git submodule add https://github.com/upbound/build build +``` + +and add a `Makefile` in the root. For example, the following will build a go project that publishes containers and helm charts. + +``` +# Project Setup +PROJECT_NAME := myrepo +PROJECT_REPO := github.com/upbound/$(PROJECT_NAME) + +PLATFORMS ?= linux_amd64 linux_arm64 +include build/makelib/common.mk + +S3_BUCKET ?= upbound.releases/myrepo +include build/makelib/output.mk + +# Setup Go +GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/api $(GO_PROJECT)/cmd/installer +GO_LDFLAGS += -X $(GO_PROJECT)/pkg/version.Version=$(VERSION) +include build/makelib/golang.mk + +# Setup Helm +HELM_BASE_URL = https://charts.upbound.io +HELM_S3_BUCKET = upbound.charts +HELM_CHARTS = myrepo-api +HELM_CHART_LINT_ARGS_myrepo-api = --set nameOverride='',imagePullSecrets='' +include build/makelib/k8s_tools.mk +include build/makelib/helm.mk + +# Docker images +DOCKER_myrepo = upbound +IMAGES = myrepo +include build/makelib/image.mk +``` + +Now build the project by running: + +``` +make -j build +``` + +and run tests: + +``` +make -j tests +``` + +To see the help run `make help`. + +## Local Development Setup + +To use local development targets, first include `deploy.mk` in your make file: + +``` +include build/makelib/local.mk +``` + +Then, run the following command to initialize a local development configuration: + +``` +make local.scaffold +``` + +You can now configure and add more components (i.e. helm releases) to your local development setup. + +## Contributing + +We welcome contributions. See [Contributing](CONTRIBUTING.md) to get started. + +## Report a Bug + +For filing bugs, suggesting improvements, or requesting new features, please open an [issue](https://github.com/upbound/build/issues). + +## Licensing + +The build project is under the Apache 2.0 license. diff --git a/build/common.sh b/build/common.sh new file mode 100644 index 0000000..ba1bf66 --- /dev/null +++ b/build/common.sh @@ -0,0 +1,91 @@ +#!/bin/bash -e + +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# get the build environment variables from the special build.vars target in the main makefile +eval $(make --no-print-directory -C ${scriptdir}/.. build.vars) + +KUBEADM_DIND_DIR=${CACHE_DIR}/kubeadm-dind + +CROSS_IMAGE=${BUILD_REGISTRY}/cross-amd64 +CROSS_IMAGE_VOLUME=cross-volume +CROSS_RSYNC_PORT=10873 + +function start_rsync_container() { + docker run \ + -d \ + -e OWNER=root \ + -e GROUP=root \ + -e MKDIRS="/volume/go/src/${PROJECT_REPO}" \ + -p ${CROSS_RSYNC_PORT}:873 \ + -v ${CROSS_IMAGE_VOLUME}:/volume \ + --entrypoint "/tini" \ + ${CROSS_IMAGE} \ + -- /build/rsyncd.sh +} + +function wait_for_rsync() { + # wait for rsync to come up + local tries=100 + while (( ${tries} > 0 )) ; do + if rsync "rsync://localhost:${CROSS_RSYNC_PORT}/" &> /dev/null ; then + return 0 + fi + tries=$(( ${tries} - 1 )) + sleep 0.1 + done + echo ERROR: rsyncd did not come up >&2 + exit 1 +} + +function stop_rsync_container() { + local id=$1 + + docker stop ${id} &> /dev/null || true + docker rm ${id} &> /dev/null || true +} + +function run_rsync() { + local src=$1 + shift + + local dst=$1 + shift + + # run the container as an rsyncd daemon so that we can copy the + # source tree to the container volume. + local id=$(start_rsync_container) + + # wait for rsync to come up + wait_for_rsync || stop_rsync_container ${id} + + # NOTE: add --progress to show files being syncd + rsync \ + --archive \ + --delete \ + --prune-empty-dirs \ + "$@" \ + $src $dst || { stop_rsync_container ${id}; return 1; } + + stop_rsync_container ${id} +} + +function rsync_host_to_container() { + run_rsync ${scriptdir}/.. rsync://localhost:${CROSS_RSYNC_PORT}/volume/go/src/${PROJECT_REPO} "$@" +} + +function rsync_container_to_host() { + run_rsync rsync://localhost:${CROSS_RSYNC_PORT}/volume/go/src/${PROJECT_REPO}/ ${scriptdir}/.. "$@" +} diff --git a/build/cross/Dockerfile b/build/cross/Dockerfile new file mode 100644 index 0000000..aca4127 --- /dev/null +++ b/build/cross/Dockerfile @@ -0,0 +1,102 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:16.04 + +# ------------------------------------------------------------------------------------------------ +# install build and release tools +RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -yy -q --no-install-recommends \ + build-essential \ + ca-certificates \ + curl \ + docker.io \ + git \ + jq \ + lsb-release \ + make \ + rsync \ + runit \ + sudo \ + unzip \ + zip && \ + DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ + DEBIAN_FRONTEND=noninteractive apt-get autoremove -y && \ + DEBIAN_FRONTEND=noninteractive apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# ------------------------------------------------------------------------------------------------ +# Install AWSCLI +RUN curl -fsSL "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.1.21.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install && \ + rm -rf awscliv2.zip ./aws + +# ------------------------------------------------------------------------------------------------ +# Go support +RUN GO_VERSION=1.18.4 && \ + GO_HASH=c9b099b68d93f5c5c8a8844a89f8db07eaa58270e3a1e01804f17f4cf8df02f5 && \ + curl -fsSL https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz -o golang.tar.gz && \ + echo "${GO_HASH} golang.tar.gz" | sha256sum -c - && \ + tar -C /usr/local -xzf golang.tar.gz && \ + rm golang.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +# precompile the go standard library for all supported platforms and configurations +# the install suffixes match those in golang.mk so please keep them in sync +RUN platforms="darwin_amd64 darwin_arm64 windows_amd64 linux_amd64 linux_arm64" && \ + for p in $platforms; do CGO_ENABLED=0 GOOS=${p%_*} GOARCH=${p##*_} GOARM=7 go install -installsuffix static -a std; done + +# ------------------------------------------------------------------------------------------------ +# Node JS and chrome support +RUN curl -fsSL https://deb.nodesource.com/setup_12.x | bash - && \ + curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ + echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list && \ + DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -yy -q --no-install-recommends \ + nodejs \ + google-chrome-stable \ + xvfb && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ + DEBIAN_FRONTEND=noninteractive apt-get autoremove -y && \ + DEBIAN_FRONTEND=noninteractive apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ + rm -f /etc/apt/sources.list.d/google.list + +# setup chrome to run inside the container +ADD xvfb-chrome /usr/bin/xvfb-chrome +RUN ln -fs /usr/bin/xvfb-chrome /usr/bin/google-chrome +ENV CHROME_BIN /usr/bin/google-chrome + +# ------------------------------------------------------------------------------------------------ +# Yarn +RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - && \ + echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list && \ + DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -yy -q --no-install-recommends yarn && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ + DEBIAN_FRONTEND=noninteractive apt-get autoremove -y && \ + DEBIAN_FRONTEND=noninteractive apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ + rm -f /etc/apt/sources.list.d/yarn.list + + +# ------------------------------------------------------------------------------------------------ +# Run tini as PID 1 and avoid signal handling issues +ARG ARCH +ARG TINI_VERSION +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${ARCH} /tini +RUN chmod +x /tini + +COPY run.sh rsyncd.sh /build/ + +ENTRYPOINT [ "/tini", "-g", "--", "/build/run.sh" ] diff --git a/build/cross/Makefile b/build/cross/Makefile new file mode 100644 index 0000000..edb6876 --- /dev/null +++ b/build/cross/Makefile @@ -0,0 +1,36 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Setup Project + +PLATFORMS := linux_amd64 +include ../makelib/common.mk + +IMAGE = $(BUILD_REGISTRY)/cross-$(ARCH) +CACHE_IMAGES = $(IMAGE) +include ../makelib/image.mk + +# ==================================================================================== +# Targets + +img.build: + @$(INFO) docker build $(IMAGE) + @cp -a . $(IMAGE_TEMP_DIR) + @docker build $(BUILD_ARGS) \ + --build-arg ARCH=$(ARCH) \ + --build-arg TINI_VERSION=$(TINI_VERSION) \ + -t $(IMAGE) \ + $(IMAGE_TEMP_DIR) + @$(OK) docker build $(IMAGE) diff --git a/build/cross/rsyncd.sh b/build/cross/rsyncd.sh new file mode 100755 index 0000000..35ff953 --- /dev/null +++ b/build/cross/rsyncd.sh @@ -0,0 +1,51 @@ +#!/bin/bash -e + +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VOLUME=${VOLUME:-/volume} +ALLOW=${ALLOW:-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8} +OWNER=${OWNER:-nobody} +GROUP=${GROUP:-nogroup} + +if [[ "${GROUP}" != "nogroup" && "${GROUP}" != "root" ]]; then + groupadd -g ${GROUP} rsync +fi + +if [[ "${OWNER}" != "nobody" && "${OWNER}" != "root" ]]; then + groupadd -u ${OWNER} -G rsync rsync +fi + +chown "${OWNER}:${GROUP}" "${VOLUME}" + +[ -f /etc/rsyncd.conf ] || cat < /etc/rsyncd.conf +uid = ${OWNER} +gid = ${GROUP} +use chroot = yes +log file = /dev/stdout +reverse lookup = no +[volume] + hosts deny = * + hosts allow = ${ALLOW} + read only = false + path = ${VOLUME} + comment = volume +EOF + +for dir in ${MKDIRS}; do + mkdir -p ${dir} + chown "${OWNER}:${GROUP}" ${dir} +done + +exec /usr/bin/rsync --no-detach --daemon --config /etc/rsyncd.conf "$@" diff --git a/build/cross/run.sh b/build/cross/run.sh new file mode 100755 index 0000000..27b9aba --- /dev/null +++ b/build/cross/run.sh @@ -0,0 +1,34 @@ +#!/bin/bash -e + +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARGS="$@" +if [ $# -eq 0 ]; then + ARGS=/bin/bash +fi + +BUILDER_USER=${BUILDER_USER:-upbound} +BUILDER_GROUP=${BUILDER_GROUP:-upbound} +BUILDER_UID=${BUILDER_UID:-1000} +BUILDER_GID=${BUILDER_GID:-1000} + +groupadd -o -g $BUILDER_GID $BUILDER_GROUP 2> /dev/null +useradd -o -m -g $BUILDER_GID -u $BUILDER_UID $BUILDER_USER 2> /dev/null +echo "$BUILDER_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +export HOME=/home/${BUILDER_USER} +echo "127.0.0.1 $(cat /etc/hostname)" >> /etc/hosts +[[ -S /var/run/docker.sock ]] && chmod 666 /var/run/docker.sock +chown -R $BUILDER_UID:$BUILDER_GID $HOME +exec chpst -u :$BUILDER_UID:$BUILDER_GID ${ARGS} diff --git a/build/cross/xvfb-chrome b/build/cross/xvfb-chrome new file mode 100755 index 0000000..ae88a91 --- /dev/null +++ b/build/cross/xvfb-chrome @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +_kill_procs() { + kill -TERM $chrome + wait $chrome +} + +# Setup a trap to catch SIGTERM and relay it to child processes +trap _kill_procs SIGTERM + +# Start Chrome inside xvfb +xvfb-run -a -s "-screen 0 1920x1080x24 -nolisten tcp" /opt/google/chrome/chrome --no-sandbox $@ & +chrome=$! + +wait $chrome diff --git a/build/makelib/common.mk b/build/makelib/common.mk new file mode 100644 index 0000000..859f7db --- /dev/null +++ b/build/makelib/common.mk @@ -0,0 +1,494 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# remove default suffixes as we dont use them +.SUFFIXES: + +# set the shell to bash always +SHELL := /usr/bin/env bash + +# default target is build +.PHONY: all +all: build + +# ==================================================================================== +# Colors + +BLACK := $(shell printf "\033[30m") +BLACK_BOLD := $(shell printf "\033[30;1m") +RED := $(shell printf "\033[31m") +RED_BOLD := $(shell printf "\033[31;1m") +GREEN := $(shell printf "\033[32m") +GREEN_BOLD := $(shell printf "\033[32;1m") +YELLOW := $(shell printf "\033[33m") +YELLOW_BOLD := $(shell printf "\033[33;1m") +BLUE := $(shell printf "\033[34m") +BLUE_BOLD := $(shell printf "\033[34;1m") +MAGENTA := $(shell printf "\033[35m") +MAGENTA_BOLD := $(shell printf "\033[35;1m") +CYAN := $(shell printf "\033[36m") +CYAN_BOLD := $(shell printf "\033[36;1m") +WHITE := $(shell printf "\033[37m") +WHITE_BOLD := $(shell printf "\033[37;1m") +CNone := $(shell printf "\033[0m") + +# ==================================================================================== +# Logger + +TIME_LONG = `date +%Y-%m-%d' '%H:%M:%S` +TIME_SHORT = `date +%H:%M:%S` +TIME = $(TIME_SHORT) + +INFO = echo ${TIME} ${BLUE}[ .. ]${CNone} +WARN = echo ${TIME} ${YELLOW}[WARN]${CNone} +ERR = echo ${TIME} ${RED}[FAIL]${CNone} +OK = echo ${TIME} ${GREEN}[ OK ]${CNone} +FAIL = (echo ${TIME} ${RED}[FAIL]${CNone} && false) + +# ==================================================================================== +# Build Options + +# Set V=1 to turn on more verbose build +V ?= 0 +ifeq ($(V),1) +MAKEFLAGS += VERBOSE=1 +else +MAKEFLAGS += --no-print-directory +endif + +# Set DEBUG=1 to turn on a debug build +DEBUG ?= 0 + +# ==================================================================================== +# Release Options + +# TODO(hasheddan): change default to main and remove master as valid option. +CHANNEL ?= master +ifeq ($(filter master main alpha beta stable,$(CHANNEL)),) +$(error invalid channel $(CHANNEL)) +endif + +ifeq ($(COMMIT_HASH),) +override COMMIT_HASH := $(shell git rev-parse HEAD) +endif + +ifeq ($(origin BRANCH_NAME), undefined) +BRANCH_NAME := $(shell git rev-parse --abbrev-ref HEAD) +endif + +REMOTE_NAME ?= origin + +# ==================================================================================== +# Platform and cross build options + +# all supported platforms we build for this can be set to other platforms if desired +# we use the golang os and arch names for convenience +PLATFORMS ?= darwin_amd64 darwin_arm64 windows_amd64 linux_amd64 linux_arm64 + +# Set the host's OS. Only linux and darwin supported for now +HOSTOS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ifeq ($(filter darwin linux,$(HOSTOS)),) +$(error build only supported on linux and darwin host currently) +endif + +# Set the host's arch. +HOSTARCH := $(shell uname -m) + +# If SAFEHOSTARCH and TARGETARCH have not been defined yet, use HOST +ifeq ($(origin SAFEHOSTARCH),undefined) +SAFEHOSTARCH := $(HOSTARCH) +endif +ifeq ($(origin TARGETARCH), undefined) +TARGETARCH := $(HOSTARCH) +endif + +# Automatically translate x86_64 to amd64 +ifeq ($(HOSTARCH),x86_64) +SAFEHOSTARCH := amd64 +TARGETARCH := amd64 +endif + +# Automatically translate aarch64 to arm64 +ifeq ($(HOSTARCH),aarch64) +SAFEHOSTARCH := arm64 +TARGETARCH := arm64 +endif + +ifeq ($(filter amd64 arm64 ppc64le ,$(SAFEHOSTARCH)),) +$(error build only supported on amd64, arm64 and ppc64le host currently) +endif + +# Standardize Host Platform variables +HOST_PLATFORM := $(HOSTOS)_$(HOSTARCH) +SAFEHOSTPLATFORM := $(HOSTOS)-$(SAFEHOSTARCH) +SAFEHOST_PLATFORM := $(HOSTOS)_$(SAFEHOSTARCH) +TARGET_PLATFORM := $(HOSTOS)_$(TARGETARCH) + +# Set the platform to build if not currently defined +ifeq ($(origin PLATFORM),undefined) +PLATFORM := $(TARGET_PLATFORM) +# if the target platform is on the supported list add it to the single build target +ifneq ($(filter $(PLATFORMS),$(TARGET_PLATFORM)),) +BUILD_PLATFORMS = $(TARGET_PLATFORM) +endif + +# for convenience always build the linux platform when building on mac +ifneq ($(HOSTOS),linux) +BUILD_PLATFORMS += linux_$(TARGETARCH) +endif + +else +BUILD_PLATFORMS = $(PLATFORM) +endif + +OS := $(word 1, $(subst _, ,$(PLATFORM))) +ARCH := $(word 2, $(subst _, ,$(PLATFORM))) + +ifeq ($(HOSTOS),darwin) +NPROCS := $(shell sysctl -n hw.ncpu) +else +NPROCS := $(shell nproc) +endif + +# ==================================================================================== +# Setup directories and paths + +# include the common make file +COMMON_SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) + +# the root directory of this repo +ifeq ($(origin ROOT_DIR),undefined) +ROOT_DIR := $(abspath $(shell cd $(COMMON_SELF_DIR)/../.. && pwd -P)) +endif + +# the output directory which holds final build produced artifacts +ifeq ($(origin OUTPUT_DIR),undefined) +OUTPUT_DIR := $(ROOT_DIR)/_output +endif + +# a working directory that holds all temporary or working items generated +# during the build. The items will be discarded on a clean build and they +# will never be cached. +ifeq ($(origin WORK_DIR), undefined) +WORK_DIR := $(ROOT_DIR)/.work +endif + +# a directory that holds tools and other items that are safe to cache +# across build invocations. removing this directory will trigger a +# re-download and waste time. Its safe to cache this directory on CI systems +ifeq ($(origin CACHE_DIR), undefined) +CACHE_DIR := $(ROOT_DIR)/.cache +endif + +TOOLS_DIR := $(CACHE_DIR)/tools +TOOLS_HOST_DIR := $(TOOLS_DIR)/$(HOST_PLATFORM) + +# ==================================================================================== +# Version + +ifeq ($(origin HOSTNAME), undefined) +HOSTNAME := $(shell hostname) +endif + +# ==================================================================================== +# Version and Tagging + +# set if you want to use tag grouping, e.g. setting it to "aws" would produce tags like "aws/v0.1.0" +# and release branch would be named as "release-aws-0.1" but the version would still be "v0.1.0". +ifneq ($(PROJECT_VERSION_TAG_GROUP),) +VERSION_TAG_PREFIX := $(PROJECT_VERSION_TAG_GROUP)/ +RELEASE_BRANCH_GROUP := $(PROJECT_VERSION_TAG_GROUP)- +endif + +# set a semantic version number from git if VERSION is undefined. +ifeq ($(origin VERSION), undefined) +# check if there are any existing `git tag` values +ifeq ($(shell git tag),) +# no tags found - default to initial tag `v0.0.0` +VERSION := $(shell echo "v0.0.0-$$(git rev-list HEAD --count)-g$$(git describe --dirty --always)" | sed 's/-/./2' | sed 's/-/./2' | sed 's/-/./2') +else +# use tags +VERSION := $(shell git describe --dirty --always --tags --match '$(VERSION_TAG_PREFIX)*' | sed 's|.*/||' | sed 's/-/./2' | sed 's/-/./2' | sed 's/-/./2') +endif +endif +export VERSION + +VERSION_REGEX := ^v\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)$$ +VERSION_VALID := $(shell echo "$(VERSION)" | grep -q '$(VERSION_REGEX)' && echo 1 || echo 0) + +# Given "v0.17.0-3.gb4eee9f.dirty" it returns "0". +VERSION_MAJOR := $(shell echo "$(VERSION)" | cut -d'.' -f1 | sed '1s/^.//') + +# Given "v0.17.0-3.gb4eee9f.dirty" it returns "17". +VERSION_MINOR := $(shell echo "$(VERSION)" | cut -d'.' -f2) + +# Given "v0.17.0-3.gb4eee9f.dirty" it returns "0-3.gb4eee9f.dirty". +# Given "v0.17.1" it returns "1". +VERSION_PATCH := $(shell echo "$(VERSION)" | cut -d'.' -f3-) + +release.tag: +ifneq ($(VERSION_VALID),1) + $(error invalid version $(VERSION). must be a semantic version with v[Major].[Minor].[Patch] only) +endif + @$(INFO) tagging commit hash $(COMMIT_HASH) with $(VERSION_TAG_PREFIX)$(VERSION) + git tag -f -m "$(VERSION_TAG_PREFIX)$(VERSION)" $(VERSION_TAG_PREFIX)$(VERSION) $(COMMIT_HASH) + git push $(REMOTE_NAME) $(VERSION_TAG_PREFIX)$(VERSION) + @set -e; if ! git ls-remote --heads $(REMOTE_NAME) | grep -q refs/heads/release-$(RELEASE_BRANCH_GROUP)$(VERSION_MAJOR).$(VERSION_MINOR); then \ + echo === creating new release branch release-$(RELEASE_BRANCH_GROUP)$(VERSION_MAJOR).$(VERSION_MINOR) ;\ + git branch -f release-$(RELEASE_BRANCH_GROUP)$(VERSION_MAJOR).$(VERSION_MINOR) $(COMMIT_HASH) ;\ + git push $(REMOTE_NAME) release-$(RELEASE_BRANCH_GROUP)$(VERSION_MAJOR).$(VERSION_MINOR) ;\ + fi + @$(OK) tagging + +# fail publish if the version is dirty +version.isdirty: + @if [[ $(VERSION) = *.dirty ]]; then \ + $(ERR) version '$(VERSION)' is dirty aborting publish. The following files changed: ;\ + git status --short;\ + exit 1; \ + fi + +# ==================================================================================== +# Helpers + +SED_CMD?=sed -i -e + +COMMA := , +EMPTY := +SPACE := $(EMPTY) $(EMPTY) + +# define a newline +define \n + + +endef + +# ==================================================================================== +# This is a special target used to support the build container + +common.buildvars: + @echo PROJECT_NAME=$(PROJECT_NAME) + @echo PROJECT_REPO=$(PROJECT_REPO) + @echo BUILD_HOST=$(HOSTNAME) + @echo BUILD_REGISTRY=$(BUILD_REGISTRY) + @echo DOCKER_REGISTRY=$(DOCKER_REGISTRY) + @echo OUTPUT_DIR=$(OUTPUT_DIR) + @echo WORK_DIR=$(WORK_DIR) + @echo CACHE_DIR=$(CACHE_DIR) + @echo HOSTOS=$(HOSTOS) + @echo HOSTARCH=$(HOSTARCH) + @echo SAFEHOSTARCH=$(SAFEHOSTARCH) + @echo TARGETARCH=$(TARGETARCH) + @echo PLATFORM=$(PLATFORM) + @echo VERSION=$(VERSION) + +build.vars: common.buildvars + +# ==================================================================================== +# Common Targets - Build and Test workflow + +# run init steps before building code +# these will run once regardless of how many platforms we are building +build.init: ; @: + +# check the code with fmt, lint, vet and other source level checks pre build +# these will run once regardless of how many platforms we are building +build.check: ; @: + +# check the code with fmt, lint, vet and other source level checks pre build +# these will run for each platform being built +build.check.platform: ; @: + +# build code. this will run once regardless of platform +build.code: ; @: + +# build code. this will run for each platform built +build.code.platform: ; @: + +# build releasable artifacts. this will run once regardless of platform +build.artifacts: ; @: + +# build releasable artifacts. this will run for each platform being built +build.artifacts.platform: ; @: + +# runs at the end of the build to do any cleanup, caching etc. +# these will run once regardless of how many platforms we are building +build.done: ; @: + +# helper targets for building multiple platforms +do.build.platform.%: + @$(MAKE) build.check.platform PLATFORM=$* + @$(MAKE) build.code.platform PLATFORM=$* +do.build.platform: $(foreach p,$(PLATFORMS), do.build.platform.$(p)) + +# helper targets for building multiple platforms +do.build.artifacts.%: + @$(MAKE) build.artifacts.platform PLATFORM=$* +do.build.artifacts: $(foreach p,$(PLATFORMS), do.build.artifacts.$(p)) + +# build for all platforms +build.all: + @$(MAKE) build.init + @$(MAKE) build.check + @$(MAKE) build.code + @$(MAKE) do.build.platform + @$(MAKE) build.artifacts + @$(MAKE) do.build.artifacts + @$(MAKE) build.done + +# build for a single platform if it's supported +build: +ifneq ($(BUILD_PLATFORMS),) + @$(MAKE) build.all PLATFORMS="$(BUILD_PLATFORMS)" +else + @: +endif + +# clean all files created during the build. +clean: + @rm -fr $(OUTPUT_DIR) $(WORK_DIR) + +# clean all files created during the build, including caches across builds +distclean: clean + @rm -fr $(CACHE_DIR) + +# run lint and other code analysis +lint.init: ; @: +lint.run: ; @: +lint.done: ; @: +lint: + @$(MAKE) lint.init + @$(MAKE) lint.run + @$(MAKE) lint.done + +# unit tests +test.init: ; @: +test.run: ; @: +test.done: ; @: + +test: + @$(MAKE) test.init + @$(MAKE) test.run + @$(MAKE) test.done + +# e2e tests +e2e.init: ; @: +e2e.run: ; @: +e2e.done: ; @: + +e2e: + @$(MAKE) e2e.init + @$(MAKE) e2e.run + @$(MAKE) e2e.done + +.PHONY: build.init build.check build.check.platform build.code build.code.platform build.artifacts build.artifacts.platform +.PHONY: build.done do.build.platform.% do.build.platform do.build.artifacts.% do.build.artifacts +.PHONY: build.all build clean distclean lint test test.init test.run test.done e2e.init e2e.run e2e.done + +# ==================================================================================== +# Release Targets + +# run init steps before publishing +publish.init: ; @: + +# publish artifacts +publish.artifacts: ; @: + +# publish all releasable artifacts +publish: version.isdirty + @$(MAKE) publish.init + @$(MAKE) publish.artifacts + +# promote init runs before promote +promote.init: ; @: + +# promote all artifacts to a release channel +promote.artifacts: ; @: + +# promote to a release channel +promote: + @$(MAKE) promote.init + @$(MAKE) promote.artifacts + +# tag a release +tag: release.tag + +# run code generation +generate.init: ; @: +generate.run: ; @: +generate.done: ; @: + +generate: + @$(MAKE) generate.init + @$(MAKE) generate.run + @$(MAKE) generate.done + +# prepare for code review +reviewable: + @$(MAKE) generate + @$(MAKE) lint + @$(MAKE) test + +# ensure generate target doesn't create a diff +check-diff: generate + @$(INFO) checking that branch is clean + @if git status --porcelain | grep . ; then $(ERR) There are uncommitted changes after running make generate. Please ensure you commit all generated files in this branch after running make generate. && false; else $(OK) branch is clean; fi + +.PHONY: publish.init publish.artifacts publish promote.init promote.artifacts promote tag generate reviewable check-diff + +# ==================================================================================== +# Help + +define HELPTEXT +Usage: make [make-options] [options] + +Common Targets: + build Build source code and other artifacts for host platform. + build.all Build source code and other artifacts for all platforms. + clean Remove all files created during the build. + distclean Remove all files created during the build including cached tools. + lint Run lint and code analysis tools. + help Show this help info. + test Runs unit tests. + e2e Runs end-to-end integration tests. + generate Run code generation. + reviewable Validate that a PR is ready for review. + check-diff Ensure the reviewable target doesn't create a git diff. + +Common Options: + DEBUG Whether to generate debug symbols. Default is 0. + PLATFORM The platform to build. + SUITE The test suite to run. + TESTFILTER Tests to run in a suite. + V Set to 1 enable verbose build. Default is 0. + +Release Targets: + publish Build and publish final releasable artifacts + promote Promote a release to a release channel + tag Tag a release + +Release Options: + VERSION The version information for binaries and releases. + CHANNEL Sets the release channel. Can be set to master, main, alpha, beta, or stable. + +endef +export HELPTEXT + +help-special: ; @: + +help: + @echo "$$HELPTEXT" + @$(MAKE) help-special + +.PHONY: help help-special diff --git a/build/makelib/controlplane.mk b/build/makelib/controlplane.mk new file mode 100644 index 0000000..d9543bf --- /dev/null +++ b/build/makelib/controlplane.mk @@ -0,0 +1,39 @@ +# Copyright 2022 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KIND_CLUSTER_NAME ?= local-dev +CROSSPLANE_NAMESPACE ?= upbound-system + +CONTROLPLANE_DUMP_DIRECTORY ?= $(OUTPUT_DIR)/controlplane-dump + +controlplane.up: $(UP) $(KUBECTL) $(KIND) + @$(INFO) setting up controlplane + @$(KIND) get kubeconfig --name $(KIND_CLUSTER_NAME) >/dev/null 2>&1 || $(KIND) create cluster --name=$(KIND_CLUSTER_NAME) +ifndef CROSSPLANE_ARGS + @$(INFO) setting up crossplane core without args + @$(KUBECTL) -n $(CROSSPLANE_NAMESPACE) get cm universal-crossplane-config >/dev/null 2>&1 || $(UP) uxp install --namespace=$(CROSSPLANE_NAMESPACE) +else + @$(INFO) setting up crossplane core with args @$(CROSSPLANE_ARGS) + @$(KUBECTL) -n $(CROSSPLANE_NAMESPACE) get cm universal-crossplane-config >/dev/null 2>&1 || $(UP) uxp install --namespace=$(CROSSPLANE_NAMESPACE) --set "args={${CROSSPLANE_ARGS}}" +endif +controlplane.down: $(UP) $(KUBECTL) $(KIND) + @$(INFO) deleting controlplane + @$(KIND) delete cluster --name=$(KIND_CLUSTER_NAME) + @$(OK) deleting controlplane + +controlplane.dump: $(KUBECTL) + mkdir -p $(CONTROLPLANE_DUMP_DIRECTORY) + @$(KUBECTL) cluster-info dump --output-directory $(CONTROLPLANE_DUMP_DIRECTORY) --all-namespaces || true + @$(KUBECTL) get crossplane --all-namespaces > $(CONTROLPLANE_DUMP_DIRECTORY)/all-crossplane.txt || true + @$(KUBECTL) get crossplane --all-namespaces -o yaml > $(CONTROLPLANE_DUMP_DIRECTORY)/all-crossplane.yaml || true \ No newline at end of file diff --git a/build/makelib/docs.mk b/build/makelib/docs.mk new file mode 100644 index 0000000..3d46edb --- /dev/null +++ b/build/makelib/docs.mk @@ -0,0 +1,74 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +ifndef SOURCE_DOCS_DIR +$(error SOURCE_DOCS_DIR must be defined) +endif + +ifndef DEST_DOCS_DIR +$(error DEST_DOCS_DIR must be defined) +endif + +ifndef DOCS_GIT_REPO +$(error DOCS_GIT_REPO must be defined) +endif + +# Optional. If false the publish step will remove this version from the +# documentation repository. +DOCS_VERSION_ACTIVE ?= true + +DOCS_VERSION ?= $(shell echo "$(BRANCH_NAME)" | sed -E "s/^release\-([0-9]+)\.([0-9]+)$$/v\1.\2/g") +DOCS_WORK_DIR := $(WORK_DIR)/docs-repo +DOCS_VERSION_DIR := $(DOCS_WORK_DIR)/$(DEST_DOCS_DIR)/$(DOCS_VERSION) + +# ==================================================================================== +# Targets + +docs.init: + rm -rf $(DOCS_WORK_DIR) + mkdir -p $(DOCS_WORK_DIR) + git clone --depth=1 -b master $(DOCS_GIT_REPO) $(DOCS_WORK_DIR) + +docs.generate: docs.init + rm -rf $(DOCS_VERSION_DIR) + @if [ "$(DOCS_VERSION_ACTIVE)" == "true" ]; then \ + $(INFO) Including version in documentation ; \ + cp -r $(SOURCE_DOCS_DIR)/ $(DOCS_VERSION_DIR); \ + $(OK) Version included in documentation ; \ + fi + +docs.run: docs.init + @if [ "$(DOCS_VERSION_ACTIVE)" == "true" ]; then \ + $(INFO) Including version in documentation ; \ + ln -s $(ROOT_DIR)/$(SOURCE_DOCS_DIR) $(DOCS_VERSION_DIR); \ + $(OK) Version included in documentation ; \ + fi + cd $(DOCS_WORK_DIR) && DOCS_VERSION=$(DOCS_VERSION) $(MAKE) run + +docs.validate: docs.generate + cd $(DOCS_WORK_DIR) && DOCS_VERSION=$(DOCS_VERSION) $(MAKE) validate + +docs.publish: docs.generate + cd $(DOCS_WORK_DIR) && DOCS_VERSION=$(DOCS_VERSION) $(MAKE) publish + +# ==================================================================================== +# Common Targets + +# only publish docs for master and release branches +ifneq ($(filter master release-%,$(BRANCH_NAME)),) +publish.artifacts: docs.publish +endif diff --git a/build/makelib/golang.mk b/build/makelib/golang.mk new file mode 100644 index 0000000..317874f --- /dev/null +++ b/build/makelib/golang.mk @@ -0,0 +1,330 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +# Optional. The Go Binary to use +GO ?= go + +# The go project including repo name, for example, github.com/rook/rook +GO_PROJECT ?= $(PROJECT_REPO) + +# Optional. These are subdirs that we look for all go files to test, vet, and fmt +GO_SUBDIRS ?= cmd pkg + +# Optional. Additional subdirs used for integration or e2e testings +GO_INTEGRATION_TESTS_SUBDIRS ?= + +# Optional build flags passed to go tools +GO_BUILDFLAGS ?= +GO_LDFLAGS ?= +GO_TAGS ?= +GO_TEST_FLAGS ?= +GO_TEST_SUITE ?= +GO_NOCOV ?= +GO_COVER_MODE ?= count +GO_CGO_ENABLED ?= 0 + +# ==================================================================================== +# Setup go environment + +# turn on more verbose build when V=1 +ifeq ($(V),1) +GO_LDFLAGS += -v -n +GO_BUILDFLAGS += -x +endif + +# whether to generate debug information in binaries. this includes DWARF and symbol tables. +ifeq ($(DEBUG),0) +GO_LDFLAGS += -s -w +endif + +# set GOOS and GOARCH +GOOS := $(OS) +GOARCH := $(ARCH) +export GOOS GOARCH + +# set GOHOSTOS and GOHOSTARCH +GOHOSTOS := $(HOSTOS) +GOHOSTARCH := $(TARGETARCH) + +GO_PACKAGES := $(foreach t,$(GO_SUBDIRS),$(GO_PROJECT)/$(t)/...) +GO_INTEGRATION_TEST_PACKAGES := $(foreach t,$(GO_INTEGRATION_TESTS_SUBDIRS),$(GO_PROJECT)/$(t)/integration) + +ifneq ($(GO_TEST_PARALLEL),) +GO_TEST_FLAGS += -p $(GO_TEST_PARALLEL) +endif + +ifneq ($(GO_TEST_SUITE),) +GO_TEST_FLAGS += -run '$(GO_TEST_SUITE)' +endif + +GOPATH := $(shell $(GO) env GOPATH) + +# setup tools used during the build +GOJUNIT := $(TOOLS_HOST_DIR)/go-junit-report +GOJUNIT_VERSION ?= v2.0.0 + +GOCOVER_COBERTURA := $(TOOLS_HOST_DIR)/gocover-cobertura +# https://github.com/t-yuki/gocover-cobertura/commit/aaee18c8195c3f2d90e5ef80ca918d265463842a +GOCOVER_COBERTURA_VERSION ?= aaee18c8195c3f2d90e5ef80ca918d265463842a + +GOIMPORTS := $(TOOLS_HOST_DIR)/goimports +GOIMPORTS_VERSION ?= v0.1.12 + +GOHOST := GOOS=$(GOHOSTOS) GOARCH=$(GOHOSTARCH) $(GO) +GO_VERSION := $(shell $(GO) version | sed -ne 's/[^0-9]*\(\([0-9]\.\)\{0,4\}[0-9][^.]\).*/\1/p') + +# we use a consistent version of gofmt even while running different go compilers. +# see https://github.com/golang/go/issues/26397 for more details +GOFMT_VERSION := $(GO_VERSION) +ifneq ($(findstring $(GOFMT_VERSION),$(GO_VERSION)),) +GOFMT := $(shell which gofmt) +else +GOFMT := $(TOOLS_HOST_DIR)/gofmt$(GOFMT_VERSION) +endif + +# We use a consistent version of golangci-lint to ensure everyone gets the same +# linters. +GOLANGCILINT_VERSION ?= 1.53.3 +GOLANGCILINT := $(TOOLS_HOST_DIR)/golangci-lint-v$(GOLANGCILINT_VERSION) + +GO_BIN_DIR := $(abspath $(OUTPUT_DIR)/bin) +GO_OUT_DIR := $(GO_BIN_DIR)/$(PLATFORM) +GO_TEST_DIR := $(abspath $(OUTPUT_DIR)/tests) +GO_TEST_OUTPUT := $(GO_TEST_DIR)/$(PLATFORM) +GO_LINT_DIR := $(abspath $(OUTPUT_DIR)/lint) +GO_LINT_OUTPUT := $(GO_LINT_DIR)/$(PLATFORM) + +ifeq ($(GOOS),windows) +GO_OUT_EXT := .exe +endif + +ifeq ($(RUNNING_IN_CI),true) +# Output checkstyle XML rather than human readable output. +# the timeout is increased to 10m, to accommodate CI machines with low resources. +GO_LINT_ARGS += --timeout 10m0s --out-format=checkstyle > $(GO_LINT_OUTPUT)/checkstyle.xml +endif + +GO_COMMON_FLAGS = $(GO_BUILDFLAGS) -tags '$(GO_TAGS)' -trimpath +GO_STATIC_FLAGS = $(GO_COMMON_FLAGS) -installsuffix static -ldflags '$(GO_LDFLAGS)' +GO_GENERATE_FLAGS = $(GO_BUILDFLAGS) -tags 'generate $(GO_TAGS)' + +# ==================================================================================== +# Go Targets + +go.init: go.vendor.lite + @if [ "$(GO111MODULE)" != "on" ] && [ "$(realpath ../../../..)" != "$(realpath $(GOPATH))" ]; then \ + $(WARN) the source directory is not relative to the GOPATH at $(GOPATH) or you are you using symlinks. The build might run into issue. Please move the source directory to be at $(GOPATH)/src/$(GO_PROJECT) ;\ + fi + +go.build: + @$(INFO) go build $(PLATFORM) + $(foreach p,$(GO_STATIC_PACKAGES),@CGO_ENABLED=0 $(GO) build -v -o $(GO_OUT_DIR)/$(lastword $(subst /, ,$(p)))$(GO_OUT_EXT) $(GO_STATIC_FLAGS) $(p) || $(FAIL) ${\n}) + $(foreach p,$(GO_TEST_PACKAGES) $(GO_LONGHAUL_TEST_PACKAGES),@CGO_ENABLED=0 $(GO) test -c -o $(GO_TEST_OUTPUT)/$(lastword $(subst /, ,$(p)))$(GO_OUT_EXT) $(GO_STATIC_FLAGS) $(p) || $(FAIL) ${\n}) + @$(OK) go build $(PLATFORM) + +go.install: + @$(INFO) go install $(PLATFORM) + $(foreach p,$(GO_STATIC_PACKAGES),@CGO_ENABLED=0 $(GO) install -v $(GO_STATIC_FLAGS) $(p) || $(FAIL) ${\n}) + @$(OK) go install $(PLATFORM) + +go.test.unit: $(GOJUNIT) $(GOCOVER_COBERTURA) + @$(INFO) go test unit-tests +ifeq ($(GO_NOCOV),true) + @$(WARN) coverage analysis is disabled + @CGO_ENABLED=0 $(GOHOST) test $(GO_TEST_FLAGS) $(GO_STATIC_FLAGS) $(GO_PACKAGES) || $(FAIL) +else + @mkdir -p $(GO_TEST_OUTPUT) + @CGO_ENABLED=$(GO_CGO_ENABLED) $(GOHOST) test -cover $(GO_STATIC_FLAGS) $(GO_PACKAGES) || $(FAIL) + @CGO_ENABLED=$(GO_CGO_ENABLED) $(GOHOST) test -v -covermode=$(GO_COVER_MODE) -coverprofile=$(GO_TEST_OUTPUT)/coverage.txt $(GO_TEST_FLAGS) $(GO_STATIC_FLAGS) $(GO_PACKAGES) 2>&1 | tee $(GO_TEST_OUTPUT)/unit-tests.log || $(FAIL) + @cat $(GO_TEST_OUTPUT)/unit-tests.log | $(GOJUNIT) -set-exit-code > $(GO_TEST_OUTPUT)/unit-tests.xml || $(FAIL) + @$(GOCOVER_COBERTURA) < $(GO_TEST_OUTPUT)/coverage.txt > $(GO_TEST_OUTPUT)/coverage.xml +endif + @$(OK) go test unit-tests + +# Depends on go.test.unit, but is only run in CI with a valid token after unit-testing is complete +# DO NOT run locally. +go.test.codecov: + @$(INFO) go test codecov + @cd $(GO_TEST_OUTPUT) && bash <(curl -s https://codecov.io/bash) || $(FAIL) + @$(OK) go test codecov + +go.test.integration: $(GOJUNIT) + @$(INFO) go test integration-tests + @mkdir -p $(GO_TEST_OUTPUT) || $(FAIL) + @CGO_ENABLED=0 $(GOHOST) test $(GO_STATIC_FLAGS) $(GO_INTEGRATION_TEST_PACKAGES) || $(FAIL) + @CGO_ENABLED=0 $(GOHOST) test $(GO_TEST_FLAGS) $(GO_STATIC_FLAGS) $(GO_INTEGRATION_TEST_PACKAGES) $(TEST_FILTER_PARAM) 2>&1 | tee $(GO_TEST_OUTPUT)/integration-tests.log || $(FAIL) + @cat $(GO_TEST_OUTPUT)/integration-tests.log | $(GOJUNIT) -set-exit-code > $(GO_TEST_OUTPUT)/integration-tests.xml || $(FAIL) + @$(OK) go test integration-tests + +go.lint: $(GOLANGCILINT) + @$(INFO) golangci-lint + @mkdir -p $(GO_LINT_OUTPUT) + @$(GOLANGCILINT) run $(GO_LINT_ARGS) || $(FAIL) + @$(OK) golangci-lint + +go.vet: + @$(INFO) go vet $(PLATFORM) + @CGO_ENABLED=0 $(GOHOST) vet $(GO_COMMON_FLAGS) $(GO_PACKAGES) $(GO_INTEGRATION_TEST_PACKAGES) || $(FAIL) + @$(OK) go vet $(PLATFORM) + +go.fmt: $(GOFMT) + @$(INFO) go fmt + @gofmt_out=$$($(GOFMT) -s -d -e $(GO_SUBDIRS) $(GO_INTEGRATION_TESTS_SUBDIRS) 2>&1) && [ -z "$${gofmt_out}" ] || (echo "$${gofmt_out}" 1>&2; $(FAIL)) + @$(OK) go fmt + +go.fmt.simplify: $(GOFMT) + @$(INFO) gofmt simplify + @$(GOFMT) -l -s -w $(GO_SUBDIRS) $(GO_INTEGRATION_TESTS_SUBDIRS) || $(FAIL) + @$(OK) gofmt simplify + +go.validate: go.modules.check go.vet go.fmt + +go.vendor.lite: go.modules.verify +go.vendor.check: go.modules.check +go.vendor.update: go.modules.update +go.vendor: go.modules.download + +go.modules.check: go.modules.tidy.check go.modules.verify + +go.modules.download: + @$(INFO) mod download + @$(GO) mod download || $(FAIL) + @$(OK) mod download + +go.modules.verify: + @$(INFO) verify go modules dependencies have expected content + @$(GO) mod verify || $(FAIL) + @$(OK) go modules dependencies verified + +go.modules.tidy: + @$(INFO) mod tidy + @$(GO) mod tidy + @$(OK) mod tidy + +go.modules.tidy.check: + @$(INFO) verify go modules dependencies are tidy + @$(GO) mod tidy + @changed=$$(git diff --exit-code --name-only go.mod go.sum 2>&1) && [ -z "$${changed}" ] || (echo "go.mod is not tidy. Please run 'make go.modules.tidy' and stage the changes" 1>&2; $(FAIL)) + @$(OK) go modules are tidy + +go.modules.update: + @$(INFO) update go modules + @$(GO) get -u ./... || $(FAIL) + @$(MAKE) go.modules.tidy + @$(MAKE) go.modules.verify + @$(OK) update go modules + +go.modules.clean: + @$(GO) clean -modcache + +go.clean: + @$(GO) clean -cache -testcache -modcache + @rm -fr $(GO_BIN_DIR) $(GO_TEST_DIR) + +go.generate: + @$(INFO) go generate $(PLATFORM) + @CGO_ENABLED=0 $(GOHOST) generate $(GO_GENERATE_FLAGS) $(GO_PACKAGES) $(GO_INTEGRATION_TEST_PACKAGES) || $(FAIL) + @$(OK) go generate $(PLATFORM) + @$(INFO) go mod tidy + @$(GOHOST) mod tidy || $(FAIL) + @$(OK) go mod tidy + +.PHONY: go.init go.build go.install go.test.unit go.test.integration go.test.codecov go.lint go.vet go.fmt go.generate +.PHONY: go.validate go.vendor.lite go.vendor go.vendor.check go.vendor.update go.clean +.PHONY: go.modules.check go.modules.download go.modules.verify go.modules.tidy go.modules.tidy.check go.modules.update go.modules.clean + +# ==================================================================================== +# Common Targets + +build.init: go.init +build.code.platform: go.build +clean: go.clean +distclean: go.distclean +lint.init: go.init +lint.run: go.lint +test.init: go.init +test.run: go.test.unit +generate.init: go.init +generate.run: go.generate + +# ==================================================================================== +# Special Targets + +fmt: go.imports +fmt.simplify: go.fmt.simplify +imports: go.imports +imports.fix: go.imports.fix +vendor: go.vendor +vendor.check: go.vendor.check +vendor.update: go.vendor.update +vet: go.vet + +define GO_HELPTEXT +Go Targets: + generate Runs go code generation followed by goimports on generated files. + fmt Checks go source code for formatting issues. + fmt.simplify Format, simplify, update source files. + imports Checks go source code import lines for issues. + imports.fix Updates go source files to fix issues with import lines. + vendor Updates vendor packages. + vendor.check Fail the build if vendor packages have changed. + vendor.update Update vendor dependencies. + vet Checks go source code and reports suspicious constructs. + test.unit.nocov Runs unit tests without coverage (faster for iterative development) +endef +export GO_HELPTEXT + +go.help: + @echo "$$GO_HELPTEXT" + +help-special: go.help + +.PHONY: fmt vendor vet go.help + +# ==================================================================================== +# Tools install targets + +$(GOLANGCILINT): + @$(INFO) installing golangci-lint-v$(GOLANGCILINT_VERSION) $(SAFEHOSTPLATFORM) + @mkdir -p $(TOOLS_HOST_DIR)/tmp-golangci-lint || $(FAIL) + @curl -fsSL https://github.com/golangci/golangci-lint/releases/download/v$(GOLANGCILINT_VERSION)/golangci-lint-$(GOLANGCILINT_VERSION)-$(SAFEHOSTPLATFORM).tar.gz | tar -xz --strip-components=1 -C $(TOOLS_HOST_DIR)/tmp-golangci-lint || $(FAIL) + @mv $(TOOLS_HOST_DIR)/tmp-golangci-lint/golangci-lint $(GOLANGCILINT) || $(FAIL) + @rm -fr $(TOOLS_HOST_DIR)/tmp-golangci-lint + @$(OK) installing golangci-lint-v$(GOLANGCILINT_VERSION) $(SAFEHOSTPLATFORM) + +$(GOFMT): + @$(INFO) installing gofmt$(GOFMT_VERSION) + @mkdir -p $(TOOLS_HOST_DIR)/tmp-fmt || $(FAIL) + @curl -sL https://dl.google.com/go/go$(GOFMT_VERSION).$(SAFEHOSTPLATFORM).tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp-fmt || $(FAIL) + @mv $(TOOLS_HOST_DIR)/tmp-fmt/go/bin/gofmt $(GOFMT) || $(FAIL) + @rm -fr $(TOOLS_HOST_DIR)/tmp-fmt + @$(OK) installing gofmt$(GOFMT_VERSION) + +$(GOIMPORTS): + @$(INFO) installing goimports + @GOBIN=$(TOOLS_HOST_DIR) $(GOHOST) install golang.org/x/tools/cmd/goimports@$(GOIMPORTS_VERSION) || $(FAIL) + @$(OK) installing goimports + +$(GOJUNIT): + @$(INFO) installing go-junit-report + @GOBIN=$(TOOLS_HOST_DIR) $(GOHOST) install github.com/jstemmer/go-junit-report/v2@$(GOJUNIT_VERSION) || $(FAIL) + @$(OK) installing go-junit-report + +$(GOCOVER_COBERTURA): + @$(INFO) installing gocover-cobertura + @GOBIN=$(TOOLS_HOST_DIR) $(GOHOST) install github.com/t-yuki/gocover-cobertura@$(GOCOVER_COBERTURA_VERSION) || $(FAIL) + @$(OK) installing gocover-cobertura diff --git a/build/makelib/helm.mk b/build/makelib/helm.mk new file mode 100644 index 0000000..ade942e --- /dev/null +++ b/build/makelib/helm.mk @@ -0,0 +1,222 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ifeq ($(HELM_CHARTS),) +$(error the variable HELM_CHARTS must be set prior to including helm.mk) +endif + +# the base url where helm charts are published +ifeq ($(HELM_BASE_URL),) +$(error the variable HELM_BASE_URL must be set prior to including helm.mk) +endif + +# the s3 bucket where helm charts are published +ifeq ($(HELM_S3_BUCKET),) +$(error the variable HELM_S3_BUCKET must be set prior to including helm.mk) +endif + +# the charts directory +HELM_CHARTS_DIR ?= $(ROOT_DIR)/cluster/charts + +# the charts output directory +HELM_OUTPUT_DIR ?= $(OUTPUT_DIR)/charts + +# the helm index file +HELM_INDEX := $(HELM_OUTPUT_DIR)/index.yaml + +HELM_DOCS_VERSION ?= v1.11.0 +HELM_DOCS_ENABLED ?= false +HELM_DOCS := $(TOOLS_HOST_DIR)/helm-docs + +HELM_VALUES_TEMPLATE_SKIPPED ?= false + +HELM_CHART_LINT_STRICT ?= true +ifeq ($(HELM_CHART_LINT_STRICT),true) +HELM_CHART_LINT_STRICT_ARG += --strict +endif + +# helm home +HELM_HOME := $(abspath $(WORK_DIR)/helm) +export HELM_HOME + +# https://helm.sh/docs/faq/#xdg-base-directory-support +ifeq ($(USE_HELM3),true) +HELM_CACHE_HOME = $(HELM_HOME)/cache +HELM_CONFIG_HOME = $(HELM_HOME)/config +HELM_DATA_HOME = $(HELM_HOME)/data +export HELM_CACHE_HOME +export HELM_CONFIG_HOME +export HELM_DATA_HOME +endif + +# remove the leading `v` for helm chart versions +HELM_CHART_VERSION := $(VERSION:v%=%) + +#Chart Museum variables +#MUSEUM_URL ?= "https://helm.example.com/" - url for chart museum +#If the following variables are set HTTP basic auth will be used. More details https://github.com/helm/chartmuseum/blob/master/README.md#basic-auth +#MUSEUM_USER ?= "helm" +#MUSEUM_PASS ?= "changeme" + +# ==================================================================================== +# Helm Targets +$(HELM_HOME): $(HELM) + @mkdir -p $(HELM_HOME) + @if [ "$(USE_HELM3)" == "false" ]; then \ + $(HELM) init -c --stable-repo-url=https://charts.helm.sh/stable; \ + fi + +$(HELM_OUTPUT_DIR): + @mkdir -p $(HELM_OUTPUT_DIR) + +define helm.chart +$(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz: $(HELM_HOME) $(HELM_OUTPUT_DIR) $(shell find $(HELM_CHARTS_DIR)/$(1) -type f) + @$(INFO) helm package $(1) $(HELM_CHART_VERSION) + @if [ "$(USE_HELM3)" == "false" ]; then \ + $(HELM) package --version $(HELM_CHART_VERSION) --app-version $(HELM_CHART_VERSION) --save=false -d $(HELM_OUTPUT_DIR) $(abspath $(HELM_CHARTS_DIR)/$(1)); \ + else \ + $(HELM) package --version $(HELM_CHART_VERSION) --app-version $(HELM_CHART_VERSION) -d $(HELM_OUTPUT_DIR) $(abspath $(HELM_CHARTS_DIR)/$(1)); \ + fi + @$(OK) helm package $(1) $(HELM_CHART_VERSION) + +helm.generate.$(1): $(HELM_HOME) $(HELM_DOCS) + @$(INFO) helm-docs $(1) +ifneq ($(HELM_DOCS_ENABLED),true) + @$(OK) helm docs not enabled [skipped] +else ifneq ($(HELM_VALUES_TEMPLATE_SKIPPED),true) + @$(WARN) helm-docs not supported with templated values.yaml [skipped] +else + @$(HELM_DOCS) + @$(OK) helm-docs $(1) +endif + +helm.generate: helm.generate.$(1) + +helm.prepare.$(1): $(HELM_HOME) + @$(INFO) helm prepare $(1) +ifeq ($(HELM_VALUES_TEMPLATE_SKIPPED),true) + @$(OK) HELM_VALUES_TEMPLATE_SKIPPED set to true [skipped] +else + @$(WARN) templating helm values.yaml for %%VERSION%% is deprecated, use appVersion and an empty tag instead. + @cp -f $(HELM_CHARTS_DIR)/$(1)/values.yaml.tmpl $(HELM_CHARTS_DIR)/$(1)/values.yaml + @cd $(HELM_CHARTS_DIR)/$(1) && $(SED_CMD) 's|%%VERSION%%|$(VERSION)|g' values.yaml + @$(OK) helm prepare $(1) +endif + +helm.prepare: helm.prepare.$(1) + +helm.lint.$(1): $(HELM_HOME) helm.prepare.$(1) + @rm -rf $(abspath $(HELM_CHARTS_DIR)/$(1)/charts) + @$(HELM) dependency update $(abspath $(HELM_CHARTS_DIR)/$(1)) + @$(HELM) lint $(abspath $(HELM_CHARTS_DIR)/$(1)) $(HELM_CHART_LINT_ARGS_$(1)) $(HELM_CHART_LINT_STRICT_ARG) + +helm.lint: helm.lint.$(1) + +helm.dep.$(1): $(HELM_HOME) + @$(INFO) helm dep $(1) $(HELM_CHART_VERSION) + @$(HELM) dependency update $(abspath $(HELM_CHARTS_DIR)/$(1)) + @$(OK) helm dep $(1) $(HELM_CHART_VERSION) + +helm.dep: helm.dep.$(1) + +$(HELM_INDEX): $(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz +endef +$(foreach p,$(HELM_CHARTS),$(eval $(call helm.chart,$(p)))) + +$(HELM_INDEX): $(HELM_HOME) $(HELM_OUTPUT_DIR) + @$(INFO) helm index + @$(HELM) repo index $(HELM_OUTPUT_DIR) + @$(OK) helm index + +helm.build: $(HELM_INDEX) + +helm.clean: + @rm -fr $(HELM_OUTPUT_DIR) + +helm.env: $(HELM) + @$(HELM) env + +# ==================================================================================== +# helm + +HELM_TEMP := $(shell mktemp -d) +HELM_URL := $(HELM_BASE_URL)/$(CHANNEL) + +helm.promote: $(HELM_HOME) + @$(INFO) promoting helm charts +# copy existing charts to a temp dir, the combine with new charts, reindex, and upload + @$(S3_SYNC) s3://$(HELM_S3_BUCKET)/$(CHANNEL) $(HELM_TEMP) + @if [ "$(S3_BUCKET)" != "" ]; then \ + $(S3_SYNC) s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION)/charts $(HELM_TEMP); \ + fi + @$(HELM) repo index --url $(HELM_URL) $(HELM_TEMP) + @$(S3_SYNC_DEL) $(HELM_TEMP) s3://$(HELM_S3_BUCKET)/$(CHANNEL) +# re-upload index.yaml setting cache-control to ensure the file is not cached by http clients + @$(S3_CP) --cache-control "private, max-age=0, no-transform" $(HELM_TEMP)/index.yaml s3://$(HELM_S3_BUCKET)/$(CHANNEL)/index.yaml + @rm -fr $(HELM_TEMP) + @$(OK) promoting helm charts + +define museum.upload +helm.museum.$(1): +ifdef MUSEUM_URL + @$(INFO) pushing helm charts $(1) to chart museum $(MUSEUM_URL) +ifneq ($(MUSEUM_USER)$(MUSEUM_PASS),"") + @$(INFO) curl -u $(MUSEUM_USER):$(MUSEUM_PASS) --data-binary '@$(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz' $(MUSEUM_URL)/api/charts + @curl -u $(MUSEUM_USER):$(MUSEUM_PASS) --data-binary '@$(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz' $(MUSEUM_URL)/api/charts +else + @$(INFO) curl --data-binary '@$(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz' $(MUSEUM_URL)/api/charts + @curl --data-binary '@$(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz' $(MUSEUM_URL)/api/charts +endif + @$(OK) pushing helm charts to chart museum +endif + +helm.museum: helm.museum.$(1) +endef +$(foreach p,$(HELM_CHARTS),$(eval $(call museum.upload,$(p)))) + +# ==================================================================================== +# Common Targets + +build.init: helm.prepare helm.lint +build.check: helm.dep +build.artifacts: helm.build +clean: helm.clean +lint: helm.lint +promote.artifacts: helm.promote helm.museum +generate.run: helm.generate + +# ==================================================================================== +# Special Targets + +dep: helm.dep + +define HELM_HELPTEXT +Helm Targets: + dep Build and publish final releasable artifacts + +endef +export HELM_HELPTEXT + +helm.help: + @echo "$$HELM_HELPTEXT" + +help-special: helm.help + +# ==================================================================================== +# Tools install targets + +$(HELM_DOCS): + @$(INFO) installing helm-docs + @GOBIN=$(TOOLS_HOST_DIR) $(GOHOST) install github.com/norwoodj/helm-docs/cmd/helm-docs@$(HELM_DOCS_VERSION) || $(FAIL) + @$(OK) installing helm-docs diff --git a/build/makelib/image.mk b/build/makelib/image.mk new file mode 100644 index 0000000..1873647 --- /dev/null +++ b/build/makelib/image.mk @@ -0,0 +1,301 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# DEPRECATED: this module has been replaced by imagelight.mk and may be removed +# in the future. + +# ==================================================================================== +# Options + +ifeq ($(origin IMAGE_DIR),undefined) +IMAGE_DIR := $(ROOT_DIR)/cluster/images +endif + +ifeq ($(origin IMAGE_OUTPUT_DIR),undefined) +IMAGE_OUTPUT_DIR := $(OUTPUT_DIR)/images/$(PLATFORM) +endif + +ifeq ($(origin IMAGE_TEMP_DIR),undefined) +IMAGE_TEMP_DIR := $(shell mktemp -d) +endif + +# set the OS base image to alpine if in not defined. set your own image for each +# supported platform. +ifeq ($(origin OSBASEIMAGE),undefined) +OSBASE ?= alpine:3.13 +ifeq ($(ARCH),$(filter $(ARCH),amd64 ppc64le)) +OSBASEIMAGE = $(OSBASE) +else ifeq ($(ARCH),arm64) +OSBASEIMAGE = arm64v8/$(OSBASE) +else +$(error unsupported architecture $(ARCH)) +endif +endif + +# shasum is not available on all systems. In that case, fall back to sha256sum. +ifneq ($(shell type shasum 2>/dev/null),) +SHA256SUM := shasum -a 256 +else +SHA256SUM := sha256sum +endif + +# a registry that is scoped to the current build tree on this host. this enables +# us to have isolation between concurrent builds on the same system, as in the case +# of multiple working directories or on a CI system with multiple executors. All images +# tagged with this build registry can safely be untagged/removed at the end of the build. +ifeq ($(origin BUILD_REGISTRY), undefined) +BUILD_REGISTRY := build-$(shell echo $(HOSTNAME)-$(ROOT_DIR) | $(SHA256SUM) | cut -c1-8) +endif + +MANIFEST_TOOL_VERSION=v1.0.3 +MANIFEST_TOOL := $(TOOLS_HOST_DIR)/manifest-tool-$(MANIFEST_TOOL_VERSION) + +# In order to reduce built time especially on jenkins, we maintain a cache +# of already built images. This cache contains images that can be used to help speed +# future docker build commands using docker's content addressable schemes. +# All cached images go in in a 'cache/' local registry and we follow an MRU caching +# policy -- keeping images that have been referenced around and evicting images +# that have to been referenced in a while (and according to a policy). Note we can +# not rely on the image's .CreatedAt date since docker only updates then when the +# image is created and not referenced. Instead we keep a date in the Tag. +CACHE_REGISTRY := cache + +# prune images that are at least this many hours old +PRUNE_HOURS ?= 48 + +# prune keeps at least this many images regardless of how old they are +PRUNE_KEEP ?= 24 + +# don't actually prune just show what prune would do. +PRUNE_DRYRUN ?= 0 + +# the cached image format +CACHE_DATE_FORMAT := "%Y-%m-%d.%H%M%S" +CACHE_PRUNE_DATE := $(shell export TZ="UTC+$(PRUNE_HOURS)"; date +"$(CACHE_DATE_FORMAT)") +CACHE_TAG := $(shell date -u +"$(CACHE_DATE_FORMAT)") + +REGISTRIES ?= $(DOCKER_REGISTRY) +IMAGE_ARCHS := $(subst linux_,,$(filter linux_%,$(PLATFORMS))) +IMAGE_PLATFORMS := $(subst _,/,$(subst $(SPACE),$(COMMA),$(filter linux_%,$(PLATFORMS)))) + +# if set to 1 docker image caching will not be used. +CACHEBUST ?= 0 +ifeq ($(CACHEBUST),1) +BUILD_ARGS += --no-cache +endif + +# if V=0 avoid showing verbose output from docker build +ifeq ($(V),0) +BUILD_ARGS ?= -q +endif + +# if PULL=1 we will always check if there is a newer base image +PULL ?= 1 +ifeq ($(PULL),1) +BUILD_BASE_ARGS += --pull +endif +BUILD_BASE_ARGS += $(BUILD_ARGS) +export PULL + +# the version of tini to use +TINI_VERSION ?= v0.16.1 + +ifeq ($(HOSTOS),Linux) +SELF_CID := $(shell cat /proc/self/cgroup | grep docker | grep -o -E '[0-9a-f]{64}' | head -n 1) +endif + +# ===================================================================================== +# Image Targets + +do.img.clean: + @for i in $(CLEAN_IMAGES); do \ + if [ -n "$$(docker images -q $$i)" ]; then \ + for c in $$(docker ps -a -q --no-trunc --filter=ancestor=$$i); do \ + if [ "$$c" != "$(SELF_CID)" ]; then \ + echo stopping and removing container $${c} referencing image $$i; \ + docker stop $${c}; \ + docker rm $${c}; \ + fi; \ + done; \ + echo cleaning image $$i; \ + docker rmi $$i > /dev/null 2>&1 || true; \ + fi; \ + done + +# this will clean everything for this build +img.clean: + @$(INFO) cleaning images for $(BUILD_REGISTRY) + @$(MAKE) do.img.clean CLEAN_IMAGES="$(shell docker images | grep -E '^$(BUILD_REGISTRY)/' | awk '{print $$1":"$$2}')" + @$(OK) cleaning images for $(BUILD_REGISTRY) + +img.done: + @rm -fr $(IMAGE_TEMP_DIR) + +img.cache: + @for i in $(CACHE_IMAGES); do \ + IMGID=$$(docker images -q $$i); \ + if [ -n "$$IMGID" ]; then \ + echo === caching image $$i; \ + CACHE_IMAGE=$(CACHE_REGISTRY)/$${i#*/}; \ + docker tag $$i $${CACHE_IMAGE}:$(CACHE_TAG); \ + for r in $$(docker images --format "{{.ID}}#{{.Repository}}:{{.Tag}}" | grep $$IMGID | grep $(CACHE_REGISTRY)/ | grep -v $${CACHE_IMAGE}:$(CACHE_TAG)); do \ + docker rmi $${r#*#} > /dev/null 2>&1 || true; \ + done; \ + fi; \ + done + +# prune removes old cached images +img.prune: + @$(INFO) pruning images older than $(PRUNE_HOURS) keeping a minimum of $(PRUNE_KEEP) images + @EXPIRED=$$(docker images --format "{{.Tag}}#{{.Repository}}:{{.Tag}}" \ + | grep -E '$(CACHE_REGISTRY)/' \ + | sort -r \ + | awk -v i=0 -v cd="$(CACHE_PRUNE_DATE)" -F "#" '{if ($$1 <= cd && i >= $(PRUNE_KEEP)) print $$2; i++ }') &&\ + for i in $$EXPIRED; do \ + echo removing expired cache image $$i; \ + [ $(PRUNE_DRYRUN) = 1 ] || docker rmi $$i > /dev/null 2>&1 || true; \ + done + @for i in $$(docker images -q -f dangling=true); do \ + echo removing dangling image $$i; \ + docker rmi $$i > /dev/null 2>&1 || true; \ + done + @$(OK) pruning + +debug.nuke: + @for c in $$(docker ps -a -q --no-trunc); do \ + if [ "$$c" != "$(SELF_CID)" ]; then \ + echo stopping and removing container $${c}; \ + docker stop $${c}; \ + docker rm $${c}; \ + fi; \ + done + @for i in $$(docker images -q); do \ + echo removing image $$i; \ + docker rmi -f $$i > /dev/null 2>&1; \ + done + +# 1: registry 2: image, 3: arch +define repo.targets +img.release.build.$(1).$(2).$(3): + @$(INFO) docker build $(1)/$(2)-$(3):$(VERSION) + @docker tag $(BUILD_REGISTRY)/$(2)-$(3) $(1)/$(2)-$(3):$(VERSION) || $(FAIL) + @# Save image as _output/images/linux_/.tar.gz (no builds for darwin or windows) + @mkdir -p $(OUTPUT_DIR)/images/linux_$(3) || $(FAIL) + @docker save $(BUILD_REGISTRY)/$(2)-$(3) | gzip -c > $(OUTPUT_DIR)/images/linux_$(3)/$(2).tar.gz || $(FAIL) + @$(OK) docker build $(1)/$(2)-$(3):$(VERSION) +img.release.build: img.release.build.$(1).$(2).$(3) + +img.release.publish.$(1).$(2).$(3): + @$(INFO) docker push $(1)/$(2)-$(3):$(VERSION) + @docker push $(1)/$(2)-$(3):$(VERSION) || $(FAIL) + @$(OK) docker push $(1)/$(2)-$(3):$(VERSION) +img.release.publish: img.release.publish.$(1).$(2).$(3) + +img.release.promote.$(1).$(2).$(3): + @$(INFO) docker promote $(1)/$(2)-$(3):$(VERSION) to $(1)/$(2)-$(3):$(CHANNEL) + @docker pull $(1)/$(2)-$(3):$(VERSION) || $(FAIL) + @[ "$(CHANNEL)" = "master" ] || docker tag $(1)/$(2)-$(3):$(VERSION) $(1)/$(2)-$(3):$(VERSION)-$(CHANNEL) || $(FAIL) + @docker tag $(1)/$(2)-$(3):$(VERSION) $(1)/$(2)-$(3):$(CHANNEL) || $(FAIL) + @[ "$(CHANNEL)" = "master" ] || docker push $(1)/$(2)-$(3):$(VERSION)-$(CHANNEL) + @docker push $(1)/$(2)-$(3):$(CHANNEL) || $(FAIL) + @$(OK) docker promote $(1)/$(2)-$(3):$(VERSION) to $(1)/$(2)-$(3):$(CHANNEL) || $(FAIL) +img.release.promote: img.release.promote.$(1).$(2).$(3) + +img.release.clean.$(1).$(2).$(3): + @[ -z "$$$$(docker images -q $(1)/$(2)-$(3):$(VERSION))" ] || docker rmi $(1)/$(2)-$(3):$(VERSION) + @[ -z "$$$$(docker images -q $(1)/$(2)-$(3):$(VERSION)-$(CHANNEL))" ] || docker rmi $(1)/$(2)-$(3):$(VERSION)-$(CHANNEL) + @[ -z "$$$$(docker images -q $(1)/$(2)-$(3):$(CHANNEL))" ] || docker rmi $(1)/$(2)-$(3):$(CHANNEL) +img.release.clean: img.release.clean.$(1).$(2).$(3) +endef +$(foreach r,$(REGISTRIES), $(foreach i,$(IMAGES), $(foreach a,$(IMAGE_ARCHS),$(eval $(call repo.targets,$(r),$(i),$(a)))))) + +img.release.manifest.publish.%: img.release.publish $(MANIFEST_TOOL) + @$(MANIFEST_TOOL) push from-args --platforms $(IMAGE_PLATFORMS) --template $(DOCKER_REGISTRY)/$*-ARCH:$(VERSION) --target $(DOCKER_REGISTRY)/$*:$(VERSION) || $(FAIL) + +img.release.manifest.promote.%: img.release.promote $(MANIFEST_TOOL) + @[ "$(CHANNEL)" = "master" ] || $(MANIFEST_TOOL) push from-args --platforms $(IMAGE_PLATFORMS) --template $(DOCKER_REGISTRY)/$*-ARCH:$(VERSION) --target $(DOCKER_REGISTRY)/$*:$(VERSION)-$(CHANNEL) || $(FAIL) + @$(MANIFEST_TOOL) push from-args --platforms $(IMAGE_PLATFORMS) --template $(DOCKER_REGISTRY)/$*-ARCH:$(VERSION) --target $(DOCKER_REGISTRY)/$*:$(CHANNEL) || $(FAIL) + +# ==================================================================================== +# Common Targets + +# if IMAGES is defined then invoke and build each image identified +ifneq ($(IMAGES),) + +ifeq ($(DOCKER_REGISTRY),) +$(error the variable DOCKER_REGISTRY must be set prior to including image.mk) +endif + +do.build.image.%: ; @$(MAKE) -C $(IMAGE_DIR)/$* PLATFORM=$(PLATFORM) +do.build.images: $(foreach i,$(IMAGES), do.build.image.$(i)) ; +build.artifacts.platform: do.build.images +build.done: img.cache img.done +clean: img.clean img.release.clean + +publish.init: img.release.build + +# only publish images for main / master and release branches +# TODO(hasheddan): remove master and support overriding +ifneq ($(filter main master release-%,$(BRANCH_NAME)),) +publish.artifacts: $(addprefix img.release.manifest.publish.,$(IMAGES)) +endif + +promote.artifacts: $(addprefix img.release.manifest.promote.,$(IMAGES)) + +else + +# otherwise we assume this .mk file is being included to build a single image + +build.artifacts.platform: img.build +build.done: img.cache img.done +clean: img.clean + +endif + +# ==================================================================================== +# Special Targets + +prune: img.prune + +define IMAGE_HELPTEXT +DEPRECATED: this module has been replaced by imagelight.mk and may be removed in the future. + +Image Targets: + prune Prune orphaned and cached images. + +Image Options: + PRUNE_HOURS The number of hours from when an image is last used for it to be + considered a target for pruning. Default is 48 hrs. + PRUNE_KEEP The minimum number of cached images to keep. Default is 24 images. + +endef +export IMAGE_HELPTEXT + +img.help: + @echo "$$IMAGE_HELPTEXT" + +help-special: img.help + +.PHONY: prune img.help + +# ==================================================================================== +# tools + +$(MANIFEST_TOOL): + @$(INFO) installing manifest-tool $(MANIFEST_TOOL_VERSION) + @mkdir -p $(TOOLS_HOST_DIR) || $(FAIL) + @curl -fsSL https://github.com/estesp/manifest-tool/releases/download/$(MANIFEST_TOOL_VERSION)/manifest-tool-$(HOSTOS)-$(SAFEHOSTARCH) > $@ || $(FAIL) + @chmod +x $@ || $(FAIL) + @$(OK) installing manifest-tool $(MANIFEST_TOOL_VERSION) diff --git a/build/makelib/imagelight.mk b/build/makelib/imagelight.mk new file mode 100644 index 0000000..c4d3557 --- /dev/null +++ b/build/makelib/imagelight.mk @@ -0,0 +1,133 @@ +# Copyright 2021 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +ifeq ($(origin IMAGE_DIR),undefined) +IMAGE_DIR := $(ROOT_DIR)/cluster/images +endif + +ifeq ($(origin IMAGE_OUTPUT_DIR),undefined) +IMAGE_OUTPUT_DIR := $(OUTPUT_DIR)/images/$(PLATFORM) +endif + +ifeq ($(origin IMAGE_TEMP_DIR),undefined) +IMAGE_TEMP_DIR := $(shell mktemp -d) +endif + +# we don't support darwin os images and instead strictly target linux +PLATFORM := $(subst darwin,linux,$(PLATFORM)) + +# shasum is not available on all systems. In that case, fall back to sha256sum. +ifneq ($(shell type shasum 2>/dev/null),) +SHA256SUM := shasum -a 256 +else +SHA256SUM := sha256sum +endif + +# a registry that is scoped to the current build tree on this host. this enables +# us to have isolation between concurrent builds on the same system, as in the case +# of multiple working directories or on a CI system with multiple executors. All images +# tagged with this build registry can safely be untagged/removed at the end of the build. +ifeq ($(origin BUILD_REGISTRY), undefined) +BUILD_REGISTRY := build-$(shell echo $(HOSTNAME)-$(ROOT_DIR) | $(SHA256SUM) | cut -c1-8) +endif + +REGISTRY_ORGS ?= docker.io +IMAGE_ARCHS := $(subst linux_,,$(filter linux_%,$(PLATFORMS))) +IMAGE_PLATFORMS := $(subst _,/,$(subst $(SPACE),$(COMMA),$(filter linux_%,$(PLATFORMS)))) +IMAGE_PLATFORMS_LIST := $(subst _,/,$(filter linux_%,$(PLATFORMS))) +IMAGE_PLATFORM := $(subst _,/,$(PLATFORM)) + +# if set to 1 docker image caching will not be used. +CACHEBUST ?= 0 +ifeq ($(CACHEBUST),1) +BUILD_ARGS += --no-cache +endif + +ifeq ($(HOSTOS),Linux) +SELF_CID := $(shell cat /proc/self/cgroup | grep docker | grep -o -E '[0-9a-f]{64}' | head -n 1) +endif + +# ===================================================================================== +# Image Targets + +do.img.clean: + @for i in $(CLEAN_IMAGES); do \ + if [ -n "$$(docker images -q $$i)" ]; then \ + for c in $$(docker ps -a -q --no-trunc --filter=ancestor=$$i); do \ + if [ "$$c" != "$(SELF_CID)" ]; then \ + echo stopping and removing container $${c} referencing image $$i; \ + docker stop $${c}; \ + docker rm $${c}; \ + fi; \ + done; \ + echo cleaning image $$i; \ + docker rmi $$i > /dev/null 2>&1 || true; \ + fi; \ + done + +# this will clean everything for this build +img.clean: + @$(INFO) cleaning images for $(BUILD_REGISTRY) + @$(MAKE) do.img.clean CLEAN_IMAGES="$(shell docker images | grep -E '^$(BUILD_REGISTRY)/' | awk '{print $$1":"$$2}')" + @$(OK) cleaning images for $(BUILD_REGISTRY) + +img.done: + @rm -fr $(IMAGE_TEMP_DIR) + +# 1: registry 2: image +define repo.targets +img.release.publish.$(1).$(2): + @$(MAKE) -C $(IMAGE_DIR)/$(2) IMAGE_PLATFORMS=$(IMAGE_PLATFORMS) IMAGE=$(1)/$(2):$(VERSION) img.publish +img.release.publish: img.release.publish.$(1).$(2) + +img.release.promote.$(1).$(2): + @$(MAKE) -C $(IMAGE_DIR)/$(2) TO_IMAGE=$(1)/$(2):$(CHANNEL) FROM_IMAGE=$(1)/$(2):$(VERSION) img.promote + @[ "$(CHANNEL)" = "master" ] || $(MAKE) -C $(IMAGE_DIR)/$(2) TO_IMAGE=$(1)/$(2):$(VERSION)-$(CHANNEL) FROM_IMAGE=$(1)/$(2):$(VERSION) img.promote +img.release.promote: img.release.promote.$(1).$(2) + +img.release.clean.$(1).$(2): + @[ -z "$$$$(docker images -q $(1)/$(2):$(VERSION))" ] || docker rmi $(1)/$(2):$(VERSION) + @[ -z "$$$$(docker images -q $(1)/$(2):$(VERSION)-$(CHANNEL))" ] || docker rmi $(1)/$(2):$(VERSION)-$(CHANNEL) + @[ -z "$$$$(docker images -q $(1)/$(2):$(CHANNEL))" ] || docker rmi $(1)/$(2):$(CHANNEL) +img.release.clean: img.release.clean.$(1).$(2) +endef +$(foreach r,$(REGISTRY_ORGS), $(foreach i,$(IMAGES),$(eval $(call repo.targets,$(r),$(i))))) + +# ==================================================================================== +# Common Targets + +do.build.image.%: + @$(MAKE) -C $(IMAGE_DIR)/$* IMAGE_PLATFORMS=$(IMAGE_PLATFORM) IMAGE=$(BUILD_REGISTRY)/$*-$(ARCH) img.build +do.build.images: $(foreach i,$(IMAGES), do.build.image.$(i)) +do.skip.images: + @$(OK) Skipping image build for unsupported platform $(IMAGE_PLATFORM) + +ifneq ($(filter $(IMAGE_PLATFORM),$(IMAGE_PLATFORMS_LIST)),) +build.artifacts.platform: do.build.images +else +build.artifacts.platform: do.skip.images +endif +build.done: img.done +clean: img.clean img.release.clean + +# only publish images for main / master and release branches +# TODO(hasheddan): remove master and support overriding +ifneq ($(filter main master release-%,$(BRANCH_NAME)),) +publish.artifacts: $(foreach r,$(REGISTRY_ORGS), $(foreach i,$(IMAGES),img.release.publish.$(r).$(i))) +endif + +promote.artifacts: $(foreach r,$(REGISTRY_ORGS), $(foreach i,$(IMAGES),img.release.promote.$(r).$(i))) diff --git a/build/makelib/k8s_tools.mk b/build/makelib/k8s_tools.mk new file mode 100644 index 0000000..d6ab5e0 --- /dev/null +++ b/build/makelib/k8s_tools.mk @@ -0,0 +1,169 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +# the version of istio to use +ISTIO_VERSION ?= 1.12.9 +ISTIO := $(TOOLS_HOST_DIR)/istioctl-$(ISTIO_VERSION) +ISTIOOS := $(HOSTOS) +ISTIO_DOWNLOAD_TUPLE := $(SAFEHOSTPLATFORM) +ifeq ($(HOSTOS),darwin) +ISTIO_DOWNLOAD_TUPLE := osx-$(SAFEHOSTARCH) +endif + +# the version of kind to use +KIND_VERSION ?= v0.16.0 +KIND := $(TOOLS_HOST_DIR)/kind-$(KIND_VERSION) + +# the version of kubectl to use +KUBECTL_VERSION ?= v1.24.3 +KUBECTL := $(TOOLS_HOST_DIR)/kubectl-$(KUBECTL_VERSION) + +# the version of kustomize to use +KUSTOMIZE_VERSION ?= v4.5.5 +KUSTOMIZE := $(TOOLS_HOST_DIR)/kustomize-$(KUSTOMIZE_VERSION) + +# the version of olm-bundle to use +OLMBUNDLE_VERSION ?= v0.5.2 +OLMBUNDLE := $(TOOLS_HOST_DIR)/olm-bundle-$(OLMBUNDLE_VERSION) + +# the version of up to use +UP_VERSION ?= v0.16.1 +UP_CHANNEL ?= stable +UP := $(TOOLS_HOST_DIR)/up-$(UP_VERSION) + +# the version of helm 3 to use +USE_HELM3 ?= false +HELM3_VERSION ?= v3.9.1 +HELM3 := $(TOOLS_HOST_DIR)/helm-$(HELM3_VERSION) + +# If we enable HELM3 we alias HELM to be HELM3 +ifeq ($(USE_HELM3),true) +HELM_VERSION ?= $(HELM3_VERSION) +HELM := $(HELM3) +else +HELM_VERSION ?= v2.17.0 +HELM := $(TOOLS_HOST_DIR)/helm-$(HELM_VERSION) +endif + +# the version of kuttl to use +KUTTL_VERSION ?= 0.12.1 +KUTTL := $(TOOLS_HOST_DIR)/kuttl-$(KUTTL_VERSION) + +# the version of uptest to use +UPTEST_VERSION ?= v0.1.0 +UPTEST := $(TOOLS_HOST_DIR)/uptest-$(UPTEST_VERSION) +# ==================================================================================== +# Common Targets + +k8s_tools.buildvars: + @echo KIND=$(KIND) + @echo KUBECTL=$(KUBECTL) + @echo KUSTOMIZE=$(KUSTOMIZE) + @echo OLM_BUNDLE=$(OLM_BUNDLE) + @echo UP=$(UP) + @echo HELM=$(HELM) + @echo HELM3=$(HELM3) + @echo KUTTL=$(KUTTL) + +build.vars: k8s_tools.buildvars + +# ==================================================================================== +# tools + +# istio download and install +$(ISTIO): + @$(INFO) installing istio $(ISTIO_VERSION) + @mkdir -p $(TOOLS_HOST_DIR)/tmp-istio || $(FAIL) + @curl --progress-bar -fsSL https://github.com/istio/istio/releases/download/$(ISTIO_VERSION)/istio-$(ISTIO_VERSION)-$(ISTIO_DOWNLOAD_TUPLE).tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp-istio || $(FAIL) + @mv $(TOOLS_HOST_DIR)/tmp-istio/istio-$(ISTIO_VERSION)/bin/istioctl $(ISTIO) || $(FAIL) + @rm -fr $(TOOLS_HOST_DIR)/tmp-istio || $(FAIL) + @$(OK) $(ISTIO) installing istio $(ISTIO_VERSION) + +# kind download and install +$(KIND): + @$(INFO) installing kind $(KIND_VERSION) + @mkdir -p $(TOOLS_HOST_DIR) || $(FAIL) + @curl -fsSLo $(KIND) https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-$(SAFEHOSTPLATFORM) || $(FAIL) + @chmod +x $(KIND) + @$(OK) installing kind $(KIND_VERSION) + +# kubectl download and install +$(KUBECTL): + @$(INFO) installing kubectl $(KUBECTL_VERSION) + @curl -fsSLo $(KUBECTL) --create-dirs https://storage.googleapis.com/kubernetes-release/release/$(KUBECTL_VERSION)/bin/$(HOSTOS)/$(SAFEHOSTARCH)/kubectl || $(FAIL) + @chmod +x $(KUBECTL) + @$(OK) installing kubectl $(KUBECTL_VERSION) + +# kustomize download and install +$(KUSTOMIZE): + @$(INFO) installing kustomize $(KUSTOMIZE_VERSION) + @mkdir -p $(TOOLS_HOST_DIR)/tmp-kustomize + @curl -fsSL https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/$(KUSTOMIZE_VERSION)/kustomize_$(KUSTOMIZE_VERSION)_$(SAFEHOST_PLATFORM).tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp-kustomize + @mv $(TOOLS_HOST_DIR)/tmp-kustomize/kustomize $(KUSTOMIZE) + @rm -fr $(TOOLS_HOST_DIR)/tmp-kustomize + @$(OK) installing kustomize $(KUSTOMIZE_VERSION) + +# olm-bundle download and install +$(OLMBUNDLE): + @$(INFO) installing olm-bundle $(OLMBUNDLE_VERSION) + @curl -fsSLo $(OLMBUNDLE) https://github.com/upbound/olm-bundle/releases/download/$(OLMBUNDLE_VERSION)/olm-bundle_$(SAFEHOSTPLATFORM) || $(FAIL) + @chmod +x $(OLMBUNDLE) + @$(OK) installing olm-bundle $(OLMBUNDLE_VERSION) + +# up download and install +$(UP): + @$(INFO) installing up $(UP_VERSION) + @curl -fsSLo $(UP) --create-dirs https://cli.upbound.io/$(UP_CHANNEL)/$(UP_VERSION)/bin/$(SAFEHOST_PLATFORM)/up?source=build || $(FAIL) + @chmod +x $(UP) + @$(OK) installing up $(UP_VERSION) + +# helm download and install only if helm3 not enabled +ifeq ($(USE_HELM3),false) +$(HELM): + @$(INFO) installing helm $(HELM_VERSION) + @mkdir -p $(TOOLS_HOST_DIR)/tmp-helm + @curl -fsSL https://get.helm.sh/helm-$(HELM_VERSION)-$(SAFEHOSTPLATFORM).tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp-helm + @mv $(TOOLS_HOST_DIR)/tmp-helm/$(SAFEHOSTPLATFORM)/helm $(HELM) + @rm -fr $(TOOLS_HOST_DIR)/tmp-helm + @$(OK) installing helm $(HELM_VERSION) +endif + +# helm3 download and install +$(HELM3): + @$(INFO) installing helm3 $(HELM_VERSION) + @mkdir -p $(TOOLS_HOST_DIR)/tmp-helm3 + @curl -fsSL https://get.helm.sh/helm-$(HELM3_VERSION)-$(SAFEHOSTPLATFORM).tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp-helm3 + @mv $(TOOLS_HOST_DIR)/tmp-helm3/$(SAFEHOSTPLATFORM)/helm $(HELM3) + @rm -fr $(TOOLS_HOST_DIR)/tmp-helm3 + @$(OK) installing helm3 $(HELM_VERSION) + +# kuttl download and install +$(KUTTL): + @$(INFO) installing kuttl $(KUTTL_VERSION) + @mkdir -p $(TOOLS_HOST_DIR) + @curl -fsSLo $(KUTTL) --create-dirs https://github.com/kudobuilder/kuttl/releases/download/v$(KUTTL_VERSION)/kubectl-kuttl_$(KUTTL_VERSION)_$(HOST_PLATFORM) || $(FAIL) + @chmod +x $(KUTTL) + @$(OK) installing kuttl $(KUTTL_VERSION) + +# uptest download and install +$(UPTEST): + @$(INFO) installing uptest $(UPTEST) + @mkdir -p $(TOOLS_HOST_DIR) + @curl -fsSLo $(UPTEST) https://github.com/upbound/uptest/releases/download/$(UPTEST_VERSION)/uptest_$(SAFEHOSTPLATFORM) || $(FAIL) + @chmod +x $(UPTEST) + @$(OK) installing uptest $(UPTEST) + diff --git a/build/makelib/local.mk b/build/makelib/local.mk new file mode 100644 index 0000000..3fdbb32 --- /dev/null +++ b/build/makelib/local.mk @@ -0,0 +1,185 @@ +SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +SCRIPTS_DIR := $(SELF_DIR)/../scripts + +KIND_CLUSTER_NAME ?= local-dev +DEPLOY_LOCAL_DIR ?= $(ROOT_DIR)/cluster/local +DEPLOY_LOCAL_POSTRENDER_WORKDIR := $(WORK_DIR)/local/post-render +DEPLOY_LOCAL_WORKDIR := $(WORK_DIR)/local/localdev +DEPLOY_LOCAL_CONFIG_DIR := $(DEPLOY_LOCAL_WORKDIR)/config +DEPLOY_LOCAL_KUBECONFIG := $(DEPLOY_LOCAL_WORKDIR)/kubeconfig +KIND_CONFIG_FILE := $(DEPLOY_LOCAL_WORKDIR)/kind.yaml +KUBECONFIG ?= $(HOME)/.kube/config + +LOCALDEV_CLONE_WITH ?= ssh # or https +LOCALDEV_LOCAL_BUILD ?= true +LOCALDEV_PULL_LATEST ?= true + +# HELM_HOME is defined in makelib/helm.mk, however, it is not possible to include makelib/helm.mk if +# repo has no helm charts where it fails with the variable HELM_CHARTS must be set prior to including helm.mk. +# We want to still use local dev tooling even the repo has no helm charts +# (e.g. deploying existing charts from other repositories). +ifndef HELM_HOME +HELM_HOME := $(abspath $(WORK_DIR)/helm) +XDG_DATA_HOME := $(HELM_HOME) +XDG_CONFIG_HOME := $(HELM_HOME) +XDG_CACHE_HOME := $(HELM_HOME) +export XDG_DATA_HOME +export XDG_CONFIG_HOME +export XDG_CACHE_HOME +$(HELM_HOME): $(HELM) + @mkdir -p $(HELM_HOME) +endif + +export BUILD_REGISTRIES=$(REGISTRIES) +ifndef REGISTRIES + # To work with imagelight.mk + export BUILD_REGISTRIES=$(REGISTRY_ORGS) +endif + +export UP +export KIND +export KUBECTL +export KUSTOMIZE +export HELM +export HELM3 +export USE_HELM3 +export GOMPLATE +export ISTIO +export ISTIO_VERSION +export BUILD_REGISTRY +export ROOT_DIR +export SCRIPTS_DIR +export KIND_CLUSTER_NAME +export WORK_DIR +export LOCALDEV_INTEGRATION_CONFIG_REPO +export LOCAL_DEV_REPOS +export LOCALDEV_CLONE_WITH +export LOCALDEV_PULL_LATEST +export DEPLOY_LOCAL_DIR +export DEPLOY_LOCAL_POSTRENDER_WORKDIR +export DEPLOY_LOCAL_WORKDIR +export DEPLOY_LOCAL_CONFIG_DIR +export DEPLOY_LOCAL_KUBECONFIG +export KIND_CONFIG_FILE +export KUBECONFIG +export LOCALDEV_LOCAL_BUILD +export HELM_OUTPUT_DIR +export BUILD_HELM_CHART_VERSION=$(HELM_CHART_VERSION) +export BUILD_HELM_CHARTS_LIST=$(HELM_CHARTS) +export BUILD_IMAGES=$(IMAGES) +export BUILD_IMAGE_ARCHS=$(subst linux_,,$(filter linux_%,$(BUILD_PLATFORMS))) +export TARGETARCH + +# Install gomplate +GOMPLATE_VERSION := 3.11.1 +GOMPLATE := $(TOOLS_HOST_DIR)/gomplate-$(GOMPLATE_VERSION) + +gomplate.buildvars: + @echo GOMPLATE=$(GOMPLATE) + +build.vars: gomplate.buildvars + +$(GOMPLATE): + @$(INFO) installing gomplate $(SAFEHOSTPLATFORM) + @curl -fsSLo $(GOMPLATE) https://github.com/hairyhenderson/gomplate/releases/download/v$(GOMPLATE_VERSION)/gomplate_$(SAFEHOSTPLATFORM) || $(FAIL) + @chmod +x $(GOMPLATE) + @$(OK) installing gomplate $(SAFEHOSTPLATFORM) + +kind.up: $(KIND) + @$(INFO) kind up + @$(KIND) get kubeconfig --name $(KIND_CLUSTER_NAME) >/dev/null 2>&1 || $(KIND) create cluster --name=$(KIND_CLUSTER_NAME) --config="$(KIND_CONFIG_FILE)" --kubeconfig="$(KUBECONFIG)" + @$(KIND) get kubeconfig --name $(KIND_CLUSTER_NAME) > $(DEPLOY_LOCAL_KUBECONFIG) + @$(OK) kind up + +kind.down: $(KIND) + @$(INFO) kind down + @$(KIND) delete cluster --name=$(KIND_CLUSTER_NAME) + @$(OK) kind down + +kind.setcontext: $(KUBECTL) kind.up + @$(KUBECTL) --kubeconfig $(KUBECONFIG) config use-context kind-$(KIND_CLUSTER_NAME) + +kind.buildvars: + @echo DEPLOY_LOCAL_KUBECONFIG=$(DEPLOY_LOCAL_KUBECONFIG) + +build.vars: kind.buildvars + +.PHONY: kind.up kind.down kind.setcontext kind.buildvars + +local.helminit: $(KUBECTL) $(HELM) kind.setcontext + @$(INFO) helm init + @docker pull gcr.io/kubernetes-helm/tiller:$(HELM_VERSION) + @$(KIND) load docker-image gcr.io/kubernetes-helm/tiller:$(HELM_VERSION) --name=$(KIND_CLUSTER_NAME) + @$(KUBECTL) --kubeconfig $(KUBECONFIG) --namespace kube-system get serviceaccount tiller > /dev/null 2>&1 || $(KUBECTL) --kubeconfig $(KUBECONFIG) --namespace kube-system create serviceaccount tiller + @$(KUBECTL) --kubeconfig $(KUBECONFIG) get clusterrolebinding tiller-cluster-rule > /dev/null 2>&1 || $(KUBECTL) --kubeconfig $(KUBECONFIG) create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + @$(HELM) ls > /dev/null 2>&1 || $(HELM) init --kubeconfig $(KUBECONFIG) --service-account tiller --upgrade --wait + @$(HELM) repo update + @$(OK) helm init + +-include $(DEPLOY_LOCAL_WORKDIR)/config.mk + +local.prepare: + @$(INFO) preparing local dev workdir + @$(SCRIPTS_DIR)/localdev-prepare.sh || $(FAIL) + @$(OK) preparing local dev workdir + +local.clean: + @$(INFO) cleaning local dev workdir + @rm -rf $(WORK_DIR)/local || $(FAIL) + @$(OK) cleaning local dev workdir + +ifeq ($(USE_HELM3),true) +local.up: local.prepare kind.up $(HELM_HOME) +else +local.up: local.prepare kind.up local.helminit +endif + +local.down: kind.down local.clean + +local.deploy.%: local.prepare $(KUBECTL) $(KUSTOMIZE) $(HELM3) $(HELM_HOME) $(GOMPLATE) kind.setcontext + @$(INFO) localdev deploy component: $* + @$(eval PLATFORMS=$(BUILD_PLATFORMS)) + @$(SCRIPTS_DIR)/localdev-deploy-component.sh $* || $(FAIL) + @$(OK) localdev deploy component: $* + +local.remove.%: local.prepare $(KUBECTL) $(HELM3) $(HELM_HOME) $(GOMPLATE) kind.setcontext + @$(INFO) localdev remove component: $* + @$(SCRIPTS_DIR)/localdev-remove-component.sh $* || $(FAIL) + @$(OK) localdev remove component: $* + +local.scaffold: + @$(INFO) localdev scaffold config + @$(SCRIPTS_DIR)/localdev-scaffold.sh || $(FAIL) + @$(OK) localdev scaffold config + +.PHONY: local.helminit local.up local.deploy.% local.remove.% local.scaffold + +# ==================================================================================== +# Special Targets + +fmt: go.imports +fmt.simplify: go.fmt.simplify +imports: go.imports +imports.fix: go.imports.fix +vendor: go.vendor +vendor.check: go.vendor.check +vendor.update: go.vendor.update +vet: go.vet + +define LOCAL_HELPTEXT +Local Targets: + local.scaffold scaffold a local development configuration + local.up stand up of a local development cluster with kind + local.down tear down local development cluster + local.deploy.% install/upgrade a local/external component, for example, local.deploy.crossplane + local.remove.% removes component, for example, local.remove.crossplane + +endef +export LOCAL_HELPTEXT + +local.help: + @echo "$$LOCAL_HELPTEXT" + +help-special: local.help + +### diff --git a/build/makelib/local.xpkg.mk b/build/makelib/local.xpkg.mk new file mode 100644 index 0000000..0df255d --- /dev/null +++ b/build/makelib/local.xpkg.mk @@ -0,0 +1,45 @@ +# Copyright 2022 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KIND_CLUSTER_NAME ?= local-dev +CROSSPLANE_NAMESPACE ?= upbound-system + +local.xpkg.init: $(KUBECTL) + @$(INFO) patching Crossplane with dev sidecar + @if ! $(KUBECTL) -n $(CROSSPLANE_NAMESPACE) get deployment crossplane -o jsonpath="{.spec.template.spec.containers[*].name}" | grep "dev" > /dev/null; then \ + $(KUBECTL) -n $(CROSSPLANE_NAMESPACE) patch deployment/crossplane --type='json' -p='[{"op":"add","path":"/spec/template/spec/containers/1","value":{"image":"alpine","name":"dev","command":["sleep","infinity"],"volumeMounts":[{"mountPath":"/tmp/cache","name":"package-cache"}]}},{"op":"add","path":"/spec/template/metadata/labels/patched","value":"true"}]'; \ + $(KUBECTL) -n $(CROSSPLANE_NAMESPACE) wait deploy crossplane --for condition=Available --timeout=60s; \ + $(KUBECTL) -n $(CROSSPLANE_NAMESPACE) wait pods -l app=crossplane,patched=true --for condition=Ready --timeout=60s; \ + fi + @$(OK) patching Crossplane with dev sidecar + +local.xpkg.sync: local.xpkg.init $(UP) + @$(INFO) copying local xpkg cache to Crossplane pod + @mkdir -p $(XPKG_OUTPUT_DIR)/cache + @for pkg in $(XPKG_OUTPUT_DIR)/linux_*/*; do $(UP) xpkg xp-extract --from-xpkg $$pkg -o $(XPKG_OUTPUT_DIR)/cache/$$(basename $$pkg .xpkg).gz; done + @XPPOD=$$($(KUBECTL) -n $(CROSSPLANE_NAMESPACE) get pod -l app=crossplane,patched=true -o jsonpath="{.items[0].metadata.name}"); \ + $(KUBECTL) -n $(CROSSPLANE_NAMESPACE) cp $(XPKG_OUTPUT_DIR)/cache -c dev $$XPPOD:/tmp + @$(OK) copying local xpkg cache to Crossplane pod + +local.xpkg.deploy.configuration.%: local.xpkg.sync + @$(INFO) deploying configuration package $* $(VERSION) + @echo '{"apiVersion":"pkg.crossplane.io/v1","kind":"Configuration","metadata":{"name":"$*"},"spec":{"package":"$*-$(VERSION).gz","packagePullPolicy":"Never"}}' | $(KUBECTL) apply -f - + @$(OK) deploying configuration package $* $(VERSION) + +local.xpkg.deploy.provider.%: $(KIND) local.xpkg.sync + @$(INFO) deploying provider package $* $(VERSION) + @$(KIND) load docker-image $(BUILD_REGISTRY)/$*-$(ARCH) -n $(KIND_CLUSTER_NAME) + @echo '{"apiVersion":"pkg.crossplane.io/v1alpha1","kind":"ControllerConfig","metadata":{"name":"config"},"spec":{"args":["-d"],"image":"$(BUILD_REGISTRY)/$*-$(ARCH)"}}' | $(KUBECTL) apply -f - + @echo '{"apiVersion":"pkg.crossplane.io/v1","kind":"Provider","metadata":{"name":"$*"},"spec":{"package":"$*-$(VERSION).gz","packagePullPolicy":"Never","controllerConfigRef":{"name":"config"}}}' | $(KUBECTL) apply -f - + @$(OK) deploying provider package $* $(VERSION) diff --git a/build/makelib/nodejs.mk b/build/makelib/nodejs.mk new file mode 100644 index 0000000..6c30c6d --- /dev/null +++ b/build/makelib/nodejs.mk @@ -0,0 +1,94 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) + +NPM := npm +NPM_MODULE_DIR := $(SELF_DIR)/../../node_modules +NPM_PACKAGE_FILE := $(SELF_DIR)/../../package.json +NPM_PACKAGE_LOCK_FILE := $(SELF_DIR)/../../package-lock.json + +NG := $(NPM) run ng -- + +# TODO: link this to overall TTY support +ifneq ($(origin NG_NO_PROGRESS), undefined) +NG_PROGRESS_ARG ?= --progress=false +npm_config_progress = false +export npm_config_progress +endif + +NG_KARMA_CONFIG ?= karma.ci.conf.js + +NG_OUTDIR ?= $(OUTPUT_DIR)/angular +export NG_OUTDIR + +# ==================================================================================== +# NPM Targets + +# some node packages like node-sass require platform/arch specific install. we need +# to run npm install for each platform. As a result we track a stamp file per host +NPM_INSTALL_STAMP := $(NPM_MODULE_DIR)/npm.install.$(HOST_PLATFORM).stamp + +# only run "npm install" if the package.json has changed +$(NPM_INSTALL_STAMP): $(NPM_PACKAGE_FILE) $(NPM_PACKAGE_LOCK_FILE) + @echo === npm install $(HOST_PLATFORM) + @$(NPM) install --no-save +# rebuild node-sass since it has platform dependent bits + @[ ! -d "$(NPM_MODULE_DIR)/node-sass" ] || $(NPM) rebuild node-sass + @touch $(NPM_INSTALL_STAMP) + +npm.install: $(NPM_INSTALL_STAMP) + +.PHONY: npm.install + +# ==================================================================================== +# Angular Project Targets + +ng.build: npm.install + @echo === ng build $(PLATFORM) + @$(NG) build --prod $(NG_PROGRESS_ARG) + +ng.lint: npm.install + @echo === ng lint + @$(NG) lint + +ng.test: npm.install + @echo === ng test + @$(NG) test $(NG_PROGRESS_ARG) --code-coverage --karma-config $(NG_KARMA_CONFIG) + +ng.test-integration: npm.install + @echo === ng e2e + @$(NG) e2e + +ng.clean: + @: + +ng.distclean: + @rm -fr $(NPM_MODULE_DIR) + +.PHONY: ng.build ng.lint ng.test ng.test-integration ng.clean ng.distclean + +# ==================================================================================== +# Common Targets + +build.code: ng.build +clean: ng.clean +distclean: ng.distclean +lint: ng.lint +test.run: ng.test +e2e.run: ng.test + diff --git a/build/makelib/output.mk b/build/makelib/output.mk new file mode 100644 index 0000000..618ae61 --- /dev/null +++ b/build/makelib/output.mk @@ -0,0 +1,76 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + + +ifeq ($(VERSION),) +$(error the VERSION variable must be set before including output.mk) +endif + + + +ifeq ($(OUTPUT_DIR),) +$(error the CHANNEL variable must be set before including output.mk) +endif + +S3_CP := aws s3 cp --only-show-errors +S3_SYNC := aws s3 sync --only-show-errors +S3_SYNC_DEL := aws s3 sync --only-show-errors --delete + +# ==================================================================================== +# Targets + +output.init: + @mkdir -p $(OUTPUT_DIR) + @echo "$(VERSION)" > $(OUTPUT_DIR)/version + +output.clean: + @rm -fr $(OUTPUT_DIR) + +# if S3_BUCKET is set, add targets for publishing and promoting artifacts +ifeq ($(S3_BUCKET),) + @$(INFO) skipped publishing outputs to an s3 bucket since 'S3_BUCKET' is not set +else + +ifeq ($(CHANNEL),) +$(error the CHANNEL variable must be set for publishing to the given S3_BUCKET) +endif + +ifeq ($(BRANCH_NAME),) +$(error the BRANCH_NAME variable must be set for publishing to the given S3_BUCKET) +endif + +output.publish: + @$(INFO) publishing outputs to s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION) + @$(S3_SYNC_DEL) $(OUTPUT_DIR) s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION) || $(FAIL) + @$(OK) publishing outputs to s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION) + +output.promote: + @$(INFO) promoting s3://$(S3_BUCKET)/$(CHANNEL)/$(VERSION) + @$(S3_SYNC_DEL) s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION) s3://$(S3_BUCKET)/$(CHANNEL)/$(VERSION) || $(FAIL) + @$(S3_SYNC_DEL) s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION) s3://$(S3_BUCKET)/$(CHANNEL)/current || $(FAIL) + @$(OK) promoting s3://$(S3_BUCKET)/$(CHANNEL)/$(VERSION) + +publish.artifacts: output.publish +promote.artifacts: output.promote + +endif + +# ==================================================================================== +# Common Targets + +build.init: output.init +build.clean: output.clean diff --git a/build/makelib/xpkg.mk b/build/makelib/xpkg.mk new file mode 100644 index 0000000..b8d19f4 --- /dev/null +++ b/build/makelib/xpkg.mk @@ -0,0 +1,131 @@ +# Copyright 2022 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +ifeq ($(origin XPKG_DIR),undefined) +XPKG_DIR := $(ROOT_DIR)/package +endif + +# xref https://github.com/upbound/provider-aws/pull/647, https://github.com/upbound/up/pull/309 +# in up v0.16.0, support for ProviderConfig documentation via object annotations was added. +# by convention, we will assume the extensions file resides in the package directory. +ifeq ($(origin XPKG_AUTH_EXT),undefined) +XPKG_AUTH_EXT := $(XPKG_DIR)/auth.yaml +endif + +ifeq ($(origin XPKG_EXAMPLES_DIR),undefined) +XPKG_EXAMPLES_DIR := $(ROOT_DIR)/examples +endif + +ifeq ($(origin XPKG_IGNORE),undefined) +XPKG_IGNORE := '' +endif + +ifeq ($(origin XPKG_OUTPUT_DIR),undefined) +XPKG_OUTPUT_DIR := $(OUTPUT_DIR)/xpkg +endif + +# shasum is not available on all systems. In that case, fall back to sha256sum. +ifneq ($(shell type shasum 2>/dev/null),) +SHA256SUM := shasum -a 256 +else +SHA256SUM := sha256sum +endif + +# a registry that is scoped to the current build tree on this host. this enables +# us to have isolation between concurrent builds on the same system, as in the case +# of multiple working directories or on a CI system with multiple executors. All images +# tagged with this build registry can safely be untagged/removed at the end of the build. +ifeq ($(origin BUILD_REGISTRY), undefined) +BUILD_REGISTRY := build-$(shell echo $(HOSTNAME)-$(ROOT_DIR) | $(SHA256SUM) | cut -c1-8) +endif + +XPKG_REG_ORGS ?= xpkg.upbound.io/crossplane +XPKG_REG_ORGS_NO_PROMOTE ?= xpkg.upbound.io/crossplane +XPKG_LINUX_PLATFORMS := $(filter linux_%,$(PLATFORMS)) +XPKG_ARCHS := $(subst linux_,,$(filter linux_%,$(PLATFORMS))) +XPKG_PLATFORMS := $(subst _,/,$(subst $(SPACE),$(COMMA),$(filter linux_%,$(PLATFORMS)))) +XPKG_PLATFORMS_LIST := $(subst _,/,$(filter linux_%,$(PLATFORMS))) +XPKG_PLATFORM := $(subst _,/,$(PLATFORM)) + +UP ?= up + +# ===================================================================================== +# XPKG Targets + +# 1: xpkg +define xpkg.build.targets +xpkg.build.$(1): + @$(INFO) Building package $(1)-$(VERSION).xpkg for $(PLATFORM) + @mkdir -p $(OUTPUT_DIR)/xpkg/$(PLATFORM) + @controller_arg=$$$$(grep -E '^kind:\s+Provider\s*$$$$' $(XPKG_DIR)/crossplane.yaml > /dev/null && echo "--controller $(BUILD_REGISTRY)/$(1)-$(ARCH)"); \ + $(UP) xpkg build \ + $$$${controller_arg} \ + --package-root $(XPKG_DIR) \ + --auth-ext $(XPKG_AUTH_EXT) \ + --examples-root $(XPKG_EXAMPLES_DIR) \ + --ignore $(XPKG_IGNORE) \ + --output $(XPKG_OUTPUT_DIR)/$(PLATFORM)/$(1)-$(VERSION).xpkg || $(FAIL) + @$(OK) Built package $(1)-$(VERSION).xpkg for $(PLATFORM) +xpkg.build: xpkg.build.$(1) +endef +$(foreach x,$(XPKGS),$(eval $(call xpkg.build.targets,$(x)))) + +# 1: registry/org 2: repo +define xpkg.release.targets +xpkg.release.publish.$(1).$(2): + @$(INFO) Pushing package $(1)/$(2):$(VERSION) + @$(UP) xpkg push \ + $(foreach p,$(XPKG_LINUX_PLATFORMS),--package $(XPKG_OUTPUT_DIR)/$(p)/$(2)-$(VERSION).xpkg ) \ + $(1)/$(2):$(VERSION) || $(FAIL) + @$(OK) Pushed package $(1)/$(2):$(VERSION) +xpkg.release.publish: xpkg.release.publish.$(1).$(2) + +xpkg.release.promote.$(1).$(2): + @$(INFO) Promoting package from $(1)/$(2):$(VERSION) to $(1)/$(2):$(CHANNEL) + @docker buildx imagetools create -t $(1)/$(2):$(CHANNEL) $(1)/$(2):$(VERSION) + @[ "$(CHANNEL)" = "master" ] || docker buildx imagetools create -t $(1)/$(2):$(VERSION)-$(CHANNEL) $(1)/$(2):$(VERSION) + @$(OK) Promoted package from $(1)/$(2):$(VERSION) to $(1)/$(2):$(CHANNEL) +xpkg.release.promote: xpkg.release.promote.$(1).$(2) +endef +$(foreach r,$(XPKG_REG_ORGS), $(foreach x,$(XPKGS),$(eval $(call xpkg.release.targets,$(r),$(x))))) + +# ==================================================================================== +# Common Targets + +do.build.xpkgs: $(foreach i,$(XPKGS),xpkg.build.$(i)) +do.skip.xpkgs: + @$(OK) Skipping xpkg build for unsupported platform $(IMAGE_PLATFORM) + +ifneq ($(filter $(XPKG_PLATFORM),$(XPKG_PLATFORMS_LIST)),) +build.artifacts.platform: do.build.xpkgs +else +build.artifacts.platform: do.skip.xpkgs +endif + +# only publish package for main / master and release branches +# TODO(hasheddan): remove master and support overriding +ifneq ($(filter main master release-%,$(BRANCH_NAME)),) +publish.artifacts: $(foreach r,$(XPKG_REG_ORGS), $(foreach x,$(XPKGS),xpkg.release.publish.$(r).$(x))) +endif + +# NOTE(hasheddan): promotion fails using buildx imagetools create with some +# registries, so a NO_PROMOTE list is supported here. Additionally, channels may +# not be used on some registries that infer vanity tags. +# https://github.com/containerd/containerd/issues/5978 +# https://github.com/estesp/manifest-tool/issues/122 +# https://github.com/moby/buildkit/issues/2438 +promote.artifacts: $(foreach r,$(filter-out $(XPKG_REG_ORGS_NO_PROMOTE),$(XPKG_REG_ORGS)), $(foreach x,$(XPKGS),xpkg.release.promote.$(r).$(x))) diff --git a/build/makelib/yarnjs.mk b/build/makelib/yarnjs.mk new file mode 100644 index 0000000..648a599 --- /dev/null +++ b/build/makelib/yarnjs.mk @@ -0,0 +1,76 @@ +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ==================================================================================== +# Options + +SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) + +YARN_DIR ?= $(SELF_DIR)/../.. + +YARN := yarn +YARN_MODULE_DIR := $(YARN_DIR)/node_modules +YARN_PACKAGE_FILE := $(YARN_DIR)/package.json +YARN_PACKAGE_LOCK_FILE := $(YARN_DIR)/yarn.lock + +YARN_OUTDIR ?= $(OUTPUT_DIR)/yarn +export YARN_OUTDIR + +# ==================================================================================== +# YARN Targets + +# some node packages like node-sass require platform/arch specific install. we need +# to run yarn for each platform. As a result we track a stamp file per host +YARN_INSTALL_STAMP := $(YARN_MODULE_DIR)/yarn.install.$(HOST_PLATFORM).stamp + +# only run "yarn" if the package.json has changed +$(YARN_INSTALL_STAMP): $(YARN_PACKAGE_FILE) $(YARN_PACKAGE_LOCK_FILE) + @echo === yarn + @cd $(YARN_DIR); $(YARN) --frozen-lockfile --non-interactive + @touch $(YARN_INSTALL_STAMP) + +yarn.install: $(YARN_INSTALL_STAMP) + +.PHONY: yarn.install + +# ==================================================================================== +# Razzle Project Targets + +yarn.build: yarn.install + @echo === yarn build $(PLATFORM) + @cd $(YARN_DIR); $(YARN) build + @mkdir -p $(YARN_OUTDIR) && cp -a build $(YARN_OUTDIR) + +yarn.test: yarn.install + @echo === yarn test + @cd $(YARN_DIR); $(YARN) test-ci + +yarn.clean: + @rm -fr $(YARN_DIR)/build _output .work + +yarn.distclean: + @rm -fr $(YARN_MODULE_DIR) .cache + +.PHONY: yarn.build yarn.lint yarn.test yarn.test-integration yarn.clean yarn.distclean + +# ==================================================================================== +# Common Targets + +build.code: yarn.build +clean: yarn.clean +distclean: yarn.distclean +lint: yarn.lint +test.run: yarn.test +e2e.run: yarn.test + diff --git a/build/reset b/build/reset new file mode 100755 index 0000000..a5904ef --- /dev/null +++ b/build/reset @@ -0,0 +1,30 @@ +#!/bin/bash -e + +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${scriptdir}/common.sh" + +# this should only clean up leaked containers during rsync +for c in $(docker ps -a -q --filter=ancestor=${CROSS_IMAGE}); do + echo removing container ${c} + docker stop ${c} + docker rm ${c} +done + +if [[ $(docker volume ls | grep ${CROSS_IMAGE_VOLUME}) ]]; then + echo removing volume ${CROSS_IMAGE_VOLUME} + docker volume rm ${CROSS_IMAGE_VOLUME} +fi diff --git a/build/run b/build/run new file mode 100755 index 0000000..3de36fb --- /dev/null +++ b/build/run @@ -0,0 +1,175 @@ +#!/bin/bash -e + +# Copyright 2016 The Upbound Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${scriptdir}/common.sh" + +#if [ ! -z $DOCKER_HOST ]; then +# echo ERROR: we only support the case where docker is running locally for now. +# exit 1 +#fi + +# build the the container if we've not done so already +if [ "`uname -m`" != "x86_64" ]; then + echo ERROR: we only support the cross container build on amd64 host. + exit 1 +fi +echo ==== building the cross container \(this could take minutes the first time\) +make_cmd="make --no-print-directory -C ${scriptdir}/cross build PULL=0" +make_output=$($make_cmd 2>&1) || { + cat <&2 +=== cross build image failed for ${CROSS_IMAGE} +${make_output} +EOF + exit 1 +} + +# run the container with the same uid and gid of the user to avoid +# permissions issues when sharing volumes with the host +USER_ARGS="-e BUILDER_UID=$( id -u ) -e BUILDER_GID=$( id -g )" +BUILDER_HOME=/home/upbound + +# setup kubectl from the host if available +if [ -d ${HOME}/.kube ]; then +KUBE_ARGS="-v ${HOME}/.kube:${BUILDER_HOME}/.kube" +fi + +if [ -x ${KUBEADM_DIND_DIR}/kubectl ]; then +KUBEADM_DIND_ARGS="-v ${KUBEADM_DIND_DIR}/kubectl:/usr/bin/kubectl" +fi + +if [ "`uname -s`" != "Linux" ]; then + + # On non-linux hosts, its is EXTREMELY slow to mount the source + # tree inside the container. As such we use rsync to copy the source + # tree to a new docker volumer on each run. The first rsync take a + # little bit of time, but subsequent ones go by fast. This approach + # is MUCH faster than using osxfs, NFS or other approaches. + + # create a new volume to hold our go workspace. NOTE: while concurrent + # runs of the build container are supported they will share the same volume + # and we will be rsyncing to it at different times. This could lead to + # undefined behavior but this should be a rare case on non-linux envs. + if [[ ! $(docker volume ls | grep ${CROSS_IMAGE_VOLUME}) ]]; then + echo ==== Creating docker volume "${CROSS_IMAGE_VOLUME}" and syncing sources + echo ==== for first time. This could take a few seconds. + docker volume create --name ${CROSS_IMAGE_VOLUME} &> /dev/null + fi + + # On non-linux the layout is as follows: + # + # /var/run/docker.sock (bind mounted to host /var/run/docker.sock) + # /${BUILDER_HOME} (bind mounted to an independent docker volume) + # /.netrc (bind mounted to host ${HOME}/.netrc) + # /.aws (bind mounted to host ${HOME}/.aws) + # /.docker (bind mounted to host ${HOME}/.docker) + # /go/src/github.com/upbound/api (rsync'd from host ) + + # now copy the source tree to the container volume. Note this also + # copies the .git directory but not the index.lock files which + # change frequently and sometimes cause rsync to fail. + rsync_host_to_container \ + --filter="- /.work/" \ + --filter="- /.vscode/" \ + --filter="- index.lock" + + MOUNT_OPTS="${MOUNT_OPTS} -v ${CROSS_IMAGE_VOLUME}:${BUILDER_HOME}" + +else + + # On linux bind mounts are cheap so we dont need to rsync + # the layout is as follows: + # + # /var/run/docker.sock (bind mounted to host /var/run/docker.sock) + # /${BUILDER_HOME} + # /.netrc (bind mounted to host ${HOME}/.netrc) + # /.aws (bind mounted to host ${HOME}/.aws) + # /.docker (bind mounted to host ${HOME}/.docker) + # /go + # /pkg (bind mounted to /.work/cross_pkg) + # /src/github.com/upbound/api (bind mounted to ) + + MOUNT_OPTS="${MOUNT_OPTS} \ + -v ${scriptdir}/../.work/cross_pkg:${BUILDER_HOME}/go/pkg + -v ${scriptdir}/..:${BUILDER_HOME}/go/src/${PROJECT_REPO}" +fi + +# we copy credential files for github access +[[ -f ${HOME}/.netrc ]] && NETRC_ARGS="-v ${HOME}/.netrc:${BUILDER_HOME}/.netrc" + +tty -s && TTY_ARGS=-ti || TTY_ARGS= + +# docker seems more reliable when running with net=host. +if [ -z "${DISABLE_HOST_NETWORK}" ]; then + NET_ARGS="--net=host" +fi + +# BUGBUG: new docker clients switch to using OSX keychain. how do we copy creds? +# sometimes we run docker inside docker. bind the docker config and socket +if [ -z "${DISABLE_NESTED_DOCKER}" ]; then + DOCKER_NESTED_ARGS="\ + -v ${HOME}/.docker:${BUILDER_HOME}/.docker \ + -v /var/run/docker.sock:/var/run/docker.sock" +fi + +# set AWS creds +AWS_ARGS="\ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -v ${HOME}/.aws:${BUILDER_HOME}/.aws" + +rsync_back() { + if [ "`uname -s`" != "Linux" ]; then + # rsync generated files back to the host. IMPORTANT: we never rsync the .git directory back + # since the host's version might have changed. + rsync_container_to_host \ + --filter="+ /_output/" \ + --filter="+ /.cache/" \ + --filter="+ /vendor/" \ + --filter="- /*" + fi +} + +# set GOPRIVATE to skip go-proxy on upbound by default - https://tip.golang.org/cmd/go/#hdr-Module_configuration_for_non_public_modules +GOPRIVATE="github.com/upbound/*" + +docker run \ + --rm \ + -h ${BUILD_HOST} \ + -e BUILD_REGISTRY=${BUILD_REGISTRY} \ + -e GOPATH="${BUILDER_HOME}/go" \ + -e GITHUB_TOKEN \ + -e VERSION \ + -e CHANNEL \ + -e RUNNING_IN_CI \ + -e GOPRIVATE="${GOPRIVATE}" \ + -v ${PWD}/_output:${BUILDER_HOME}/go/bin \ + ${TTY_ARGS} \ + ${KUBE_ARGS} \ + ${KUBEADM_DIND_ARGS} \ + ${DOWNLOAD_ARGS} \ + ${NETRC_ARGS} \ + ${AWS_ARGS} \ + ${USER_ARGS} \ + ${DOCKER_NESTED_ARGS} \ + ${NET_ARGS} \ + ${MOUNT_OPTS} \ + -w ${BUILDER_HOME}/go/src/${PROJECT_REPO} \ + ${CROSS_IMAGE} \ + "$@" || { rsync_back; exit 1; } + +rsync_back diff --git a/build/scripts/load-configs.sh b/build/scripts/load-configs.sh new file mode 100644 index 0000000..79e2e35 --- /dev/null +++ b/build/scripts/load-configs.sh @@ -0,0 +1,46 @@ +COMPONENT=$1 + +# REQUIRED_IMAGES is the array of images that the COMPONENT needs. +# These images will be pulled (if not exists) and loaded into the kind cluster before deployment. +# If an image has tags, it will be used. +# If an image does not have a tag, "v${HELM_CHART_VERSION}" will be used as a tag. +REQUIRED_IMAGES=() + +# HELM_CHART_NAME is the name of the helm chart to deploy. If not set, defaults to COMPONENT +HELM_CHART_NAME="" +# HELM_CHART_VERSION is the version of the helm chart to deploy. +# If LOCALDEV_LOCAL_BUILD=true, HELM_CHART_VERSION will be set the version in build system. +# If LOCALDEV_LOCAL_BUILD=false, HELM_CHART_VERSION defaults to latest version in the HELM_REPOSITORY +HELM_CHART_VERSION="" +# HELM_REPOSITORY_NAME is the name of the helm repository. +# This will only be used if LOCALDEV_LOCAL_BUILD=false or HELM_CHART_NAME is not a local chart (e.g. not in HELM_CHARTS array) +HELM_REPOSITORY_NAME="" +# HELM_REPOSITORY_NAME is the url of the helm repository. +HELM_REPOSITORY_URL="" +# HELM_REPOSITORY_FORCE_UPDATE controls whether always update helm repositories or not. +# If false, "helm repo update" will only be called if repo does not exist already. +HELM_REPOSITORY_FORCE_UPDATE="false" +# HELM_RELEASE_NAME is the name of the helm release. If not set, defaults to COMPONENT +HELM_RELEASE_NAME="" +# HELM_RELEASE_NAMESPACE is the namespace for the helm release. +HELM_RELEASE_NAMESPACE="default" +# HELM_DELETE_ON_FAILURE controls whether to delete/rollback a failed install/upgrade. +HELM_DELETE_ON_FAILURE="true" + +# COMPONENT_SKIP_DEPLOY controls whether (conditionally) skip deployment of a component or not. +COMPONENT_SKIP_DEPLOY="false" + +MAIN_CONFIG_FILE="${DEPLOY_LOCAL_CONFIG_DIR}/config.env" +COMPONENT_CONFIG_DIR="${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}" +COMPONENT_CONFIG_FILE="${COMPONENT_CONFIG_DIR}/config.env" + +if [[ ! -d "${COMPONENT_CONFIG_DIR}" ]]; then + echo_error "Component config dir \"${COMPONENT_CONFIG_DIR}\" does not exist (or is not a directory), did you run make local.prepare ?" +fi + +if [[ -f "${MAIN_CONFIG_FILE}" ]]; then + source "${MAIN_CONFIG_FILE}" +fi +if [[ -f "${COMPONENT_CONFIG_FILE}" ]]; then + source "${COMPONENT_CONFIG_FILE}" +fi diff --git a/build/scripts/localdev-deploy-component.sh b/build/scripts/localdev-deploy-component.sh new file mode 100755 index 0000000..86789f9 --- /dev/null +++ b/build/scripts/localdev-deploy-component.sh @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -aeuo pipefail + +COMPONENT=$1 + +# Commands for searching a repo differs for helm2 and helm3 +# helm2 search -l +# helm3 search repo -l +HELM_SEARCH_REPO="${HELM} search" +if [ "${USE_HELM3}" == "true" ]; then + HELM_SEARCH_REPO="${HELM} search repo" +fi + +# Source utility functions +source "${SCRIPTS_DIR}/utils.sh" +# sourcing load-configs.sh: +# - initializes configuration variables with default values +# - loads top level configuration +# - loads component level configuration +source "${SCRIPTS_DIR}/load-configs.sh" "${COMPONENT}" + +# Skip deployment of this component if COMPONENT_SKIP_DEPLOY is set to true +if [ "${COMPONENT_SKIP_DEPLOY}" == "true" ]; then + echo_info "COMPONENT_SKIP_DEPLOY set to true, skipping deployment of ${COMPONENT}" + exit 0 +fi + +DEPLOY_SCRIPT="${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/deploy.sh" + +# Run deploy script, if exists. +# If there is a deploy.sh script, which indicates this is a "script-only" component, only it will be run for this +# component and no helm deployments will be made. +if [ -f "${DEPLOY_SCRIPT}" ]; then + echo_info "Loading required images..." + # shellcheck disable=SC2068 + for i in ${REQUIRED_IMAGES[@]+"${REQUIRED_IMAGES[@]}"}; do + pullAndLoadImage "${i}" + done + echo_info "Loading required images...OK" + + echo_info "Running deploy script..." + source "${DEPLOY_SCRIPT}" + echo_info "Running deploy script...OK" + exit 0 +fi + +# if HELM_CHART_NAME is not set, default to component name +if [ -z "${HELM_CHART_NAME}" ]; then + HELM_CHART_NAME="${COMPONENT}" +fi + +registries_arr=($BUILD_REGISTRIES) +images_arr=($BUILD_IMAGES) +image_archs_arr=($BUILD_IMAGE_ARCHS) +charts_arr=($BUILD_HELM_CHARTS_LIST) + +post_render_args="" + +if [ "${LOCALDEV_LOCAL_BUILD}" == "true" ] && containsElement "${HELM_CHART_NAME}" ${charts_arr[@]+"${charts_arr[@]}"}; then + post_render_args="--post-renderer ${DEPLOY_LOCAL_POSTRENDER_WORKDIR}/exec" + + # If local build is set and helm chart is from this repository, use locally build helm chart tgz file. + echo_info "Deploying locally built artifacts..." + HELM_CHART_VERSION=${BUILD_HELM_CHART_VERSION} + HELM_CHART_REF="${HELM_OUTPUT_DIR}/${COMPONENT}-${HELM_CHART_VERSION}.tgz" + [ -f "${HELM_CHART_REF}" ] || echo_error "Local chart ${HELM_CHART_REF} not found. Did you run \"make build\" ? " + + # If local build, tag "required" local images, so that they can be load into kind cluster at a later step. + for r in ${registries_arr[@]+"${registries_arr[@]}"}; do + for i in ${images_arr[@]+"${images_arr[@]}"}; do + for a in ${image_archs_arr[@]+"${image_archs_arr[@]}"}; do + if containsElement "${r}/${i}" ${REQUIRED_IMAGES[@]+"${REQUIRED_IMAGES[@]}"}; then + echo_info "Tagging locally built image as ${r}/${i}:${VERSION}" + docker tag "${BUILD_REGISTRY}/${i}-${a}" "${r}/${i}:${VERSION}" + fi + done + done + done +else + # If local build is NOT set or helm chart is NOT from this repository, deploy chart from a remote repository. + echo_info "Deploying artifacts in chart repo \"${HELM_REPOSITORY_NAME}\"..." + if [ -z ${HELM_REPOSITORY_NAME} ] || [ -z ${HELM_CHART_NAME} ]; then + echo_error "HELM_REPOSITORY_NAME and/or HELM_CHART_NAME is not set for component ${COMPONENT}!" + fi + HELM_CHART_REF="${HELM_REPOSITORY_NAME}/${HELM_CHART_NAME}" + # Add helm repo and update repositories, if repo is not added already or force update is set. + if [ "${HELM_REPOSITORY_FORCE_UPDATE}" == "true" ] || ! "${HELM}" repo list -o yaml |grep -i "Name:\s*${HELM_REPOSITORY_NAME}\s*$" >/dev/null; then + "${HELM}" repo add "${HELM_REPOSITORY_NAME}" "${HELM_REPOSITORY_URL}" + "${HELM}" repo update + fi + if [ -z "${HELM_CHART_VERSION}" ]; then + # if no HELM_CHART_VERSION provided, then get the latest version from repo which will be used to load required images for chart. + [ "${LOCALDEV_PULL_LATEST}" == "true" ] && "${HELM}" repo update + HELM_CHART_VERSION=$(${HELM_SEARCH_REPO} -l ${HELM_CHART_REF} --devel |awk 'NR==2{print $2}') + echo_info "Latest version found in repo: ${HELM_CHART_VERSION}" + fi + if [ -z "${HELM_CHART_VERSION}" ]; then + echo_error "No version found in repo for chart ${HELM_CHART_REF}" + fi +fi + +echo_info "Loading required images..." +# shellcheck disable=SC2068 +for i in ${REQUIRED_IMAGES[@]+"${REQUIRED_IMAGES[@]}"}; do + # check if image has a tag, if not, append tag for the chart + if ! echo "${i}" | grep ":"; then + i="${i}:v${HELM_CHART_VERSION}" + fi + # Pull the image: + # - if has a tag "master" or "latest" + # - or does not exist already. + pullAndLoadImage "${i}" +done +echo_info "Loading required images...OK" + +PREDEPLOY_SCRIPT="${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/pre-deploy.sh" +POSTDEPLOY_SCRIPT="${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/post-deploy.sh" + +# Run config.validate.sh if exists. +test -f "${DEPLOY_LOCAL_CONFIG_DIR}/config.validate.sh" && source "${DEPLOY_LOCAL_CONFIG_DIR}/config.validate.sh" + +# Create the HELM_RELEASE_NAMESPACE if not exist already. +createNamespace "${HELM_RELEASE_NAMESPACE}" + +# Run pre-deploy script, if exists. +if [ -f "${PREDEPLOY_SCRIPT}" ]; then + echo_info "Running pre-deploy script..." + source "${PREDEPLOY_SCRIPT}" + echo_info "Running pre-deploy script...OK" +fi + +# With all configuration sourced as environment variables, render value-overrides.yaml file with gomplate. +"${GOMPLATE}" -f "${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/value-overrides.yaml.tmpl" \ + -o "${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/value-overrides.yaml" + +helm_chart_version_flag="--devel" +if [ -n "${HELM_CHART_VERSION}" ]; then + helm_chart_version_flag="--version ${HELM_CHART_VERSION}" +fi + +helm_wait_atomic_flag="--wait" +if [ "${HELM_DELETE_ON_FAILURE}" == "true" ]; then + helm_wait_atomic_flag="--atomic" +fi + +# if HELM_RELEASE_NAME is not set, default to component name +if [ -z "${HELM_RELEASE_NAME}" ]; then + HELM_RELEASE_NAME=${COMPONENT} +fi + +echo_info "Running helm upgrade --install with computed parameters..." +# shellcheck disable=SC2086 +set -x +"${HELM}" upgrade --install "${HELM_RELEASE_NAME}" --namespace "${HELM_RELEASE_NAMESPACE}" --kubeconfig "${KUBECONFIG}" \ + "${HELM_CHART_REF}" ${helm_chart_version_flag:-} -f "${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/value-overrides.yaml" \ + ${post_render_args:-} ${helm_wait_atomic_flag:-} +{ set +x; } 2>/dev/null +echo_info "Running helm upgrade --install with computed parameters...OK" + +# Run post-deploy script, if exists. +if [ -f "${POSTDEPLOY_SCRIPT}" ]; then + echo_info "Running post-deploy script..." + source "${POSTDEPLOY_SCRIPT}" + echo_info "Running post-deploy script...OK" +fi \ No newline at end of file diff --git a/build/scripts/localdev-prepare.sh b/build/scripts/localdev-prepare.sh new file mode 100755 index 0000000..5c57847 --- /dev/null +++ b/build/scripts/localdev-prepare.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +set -aeuo pipefail + +# Source utility functions +source "${SCRIPTS_DIR}/utils.sh" + +getRepo() { + repo_config=$1 + + IFS='@' read -ra repo_and_branch <<< "${repo_config}" + repo="${repo_and_branch[0]}" + branch="${repo_and_branch[1]:-}" + [[ -z "${branch}" ]] && branch="master" + + repo_dir=$(basename "${repo}" .git) + repo_cache_dir="${LOCALDEV_WORKDIR_REPOS}/${repo_dir}" + + if ! [ -d "${repo_cache_dir}" ]; then + echo_info "Cloning branch \"${branch}\" of repo \"${repo}\"..." + repo_url="git@github.com:${repo}.git" + if [ "${LOCALDEV_CLONE_WITH}" == "https" ]; then + repo_url="https://github.com/${repo}.git" + fi + git clone --depth 1 "${repo_url}" "${repo_cache_dir}" -b "${branch}" + echo_info "Cloning branch \"${branch}\" of repo \"${repo}\"...OK" + elif [ "${LOCALDEV_PULL_LATEST}" == "true" ]; then + echo_info "Getting latest branch \"${branch}\" of repo \"${repo}\"..." + git -C "${repo_cache_dir}" stash > /dev/null + git -C "${repo_cache_dir}" fetch origin + if ! output=$(git -C "${repo_cache_dir}" reset --hard origin/"${branch}" 2>&1); then + echo_error "${output}" + fi + + echo_info "Getting latest branch \"${branch}\" of repo \"${repo}\"...OK" + fi +} + +### + +# prepare local dev configuration under ".work/local" by gathering configuration from different repositories. +LOCALDEV_WORKDIR=${WORK_DIR}/local +mkdir -p "${LOCALDEV_WORKDIR}" +LOCALDEV_WORKDIR_REPOS=${LOCALDEV_WORKDIR}/repos + +if [ -z "${LOCALDEV_INTEGRATION_CONFIG_REPO}" ]; then + echo_info "No integration config repo configured, using local config" + mkdir -p "${DEPLOY_LOCAL_WORKDIR}" + cp -rf "${DEPLOY_LOCAL_DIR}/." "${DEPLOY_LOCAL_WORKDIR}" +else + echo_info "Using integration config from repo@branch \"${LOCALDEV_INTEGRATION_CONFIG_REPO}\"" + getRepo "${LOCALDEV_INTEGRATION_CONFIG_REPO}" + mkdir -p "${DEPLOY_LOCAL_WORKDIR}" + cp -rf "${repo_cache_dir}/." "${DEPLOY_LOCAL_WORKDIR}" +fi + +if [ -n "${LOCAL_DEV_REPOS}" ]; then + repositories_arr=($LOCAL_DEV_REPOS) + for i in ${repositories_arr[@]+"${repositories_arr[@]}"}; do + + local_repo=$(basename $(git config --get remote.origin.url) .git) + base_repo=$(basename "${i}" .git) + + if [ "${LOCALDEV_LOCAL_BUILD}" == "true" ] && [ "${base_repo}" == "${local_repo}" ]; then + # if it is a local build and repo is the local one, just use local config + + echo_info "Using local config for repo \"${base_repo}\"" + repo_dir="${ROOT_DIR}" + else + getRepo "${i}" + repo_dir=${LOCALDEV_WORKDIR_REPOS}/${base_repo} + fi + + # copy local dev config under workdir + local_config_dir="${repo_dir}/cluster/local/config" + if [ -d "${local_config_dir}" ]; then + cp -rf "${local_config_dir}/." "${DEPLOY_LOCAL_WORKDIR}/config" + else + echo_warn "No local dev config found for repo \"${base_repo}\"" + fi + done +fi + +# prepare post-render workdir +mkdir -p "${DEPLOY_LOCAL_POSTRENDER_WORKDIR}" + +localdev_postrender_kustomization="${DEPLOY_LOCAL_POSTRENDER_WORKDIR}/kustomization.yaml" +cat << EOF > "${localdev_postrender_kustomization}" +resources: + - in.yaml +patches: + - path: patch-deployment.yaml + target: + kind: Deployment + name: ".*" + - path: patch-rollout.yaml + target: + kind: Rollout + name: ".*" +EOF + +localdev_postrender_patch_deployment="${DEPLOY_LOCAL_POSTRENDER_WORKDIR}/patch-deployment.yaml" +cat << EOF > "${localdev_postrender_patch_deployment}" +apiVersion: apps/v1 +kind: Deployment +metadata: + name: any +spec: + template: + metadata: + annotations: + rollme: "$RANDOM" +EOF + +localdev_postrender_patch_rollout="${DEPLOY_LOCAL_POSTRENDER_WORKDIR}/patch-rollout.yaml" +cat << EOF > "${localdev_postrender_patch_rollout}" +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: any +spec: + template: + metadata: + annotations: + rollme: "$RANDOM" +EOF + +LOCALDEV_POSTRENDER_EXEC="${DEPLOY_LOCAL_POSTRENDER_WORKDIR}/exec" +cat << EOF > "${LOCALDEV_POSTRENDER_EXEC}" +#!/bin/bash + +cat <&0 > ${DEPLOY_LOCAL_POSTRENDER_WORKDIR}/in.yaml + +${KUSTOMIZE} build ${DEPLOY_LOCAL_POSTRENDER_WORKDIR} +EOF +chmod +x "${LOCALDEV_POSTRENDER_EXEC}" diff --git a/build/scripts/localdev-remove-component.sh b/build/scripts/localdev-remove-component.sh new file mode 100755 index 0000000..e32b301 --- /dev/null +++ b/build/scripts/localdev-remove-component.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +set -aeuo pipefail + +COMPONENT=$1 + +source "${SCRIPTS_DIR}/utils.sh" +source "${SCRIPTS_DIR}/load-configs.sh" "${COMPONENT}" + +DEPLOY_SCRIPT="${DEPLOY_LOCAL_CONFIG_DIR}/${COMPONENT}/deploy.sh" + +# Run deploy script, if exists. +# If there is a deploy.sh script, which indicates this is a "script-only" component, removing not supported. +if [ -f "${DEPLOY_SCRIPT}" ]; then + echo_warn "${COMPONENT} is a \"script-only\" component, local.remove not supported!" + exit 0 +fi + + +if [ -z "${HELM_RELEASE_NAME}" ]; then + HELM_RELEASE_NAME=${COMPONENT} +fi + +helm_purge_flag="--purge" +if [ "${USE_HELM3}" == "true" ]; then + HELM="${HELM3}" + XDG_DATA_HOME="${HELM_HOME}" + XDG_CONFIG_HOME="${HELM_HOME}" + XDG_CACHE_HOME="${HELM_HOME}" + helm_purge_flag="" +fi + +echo_info "Running helm delete..." +set -x +"${HELM}" delete "${HELM_RELEASE_NAME}" -n "${HELM_RELEASE_NAMESPACE}" --kubeconfig "${KUBECONFIG}" ${helm_purge_flag} +{ set +x; } 2>/dev/null +echo_info "Running helm delete...OK!" \ No newline at end of file diff --git a/build/scripts/localdev-scaffold.sh b/build/scripts/localdev-scaffold.sh new file mode 100755 index 0000000..f0c3d7a --- /dev/null +++ b/build/scripts/localdev-scaffold.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +set -aeuo pipefail + +deploy_local_root=${ROOT_DIR}/cluster/local + +local_config_root=${deploy_local_root} +test -d ${local_config_root}/config && { echo "Directory \"${local_config_root}/config\" already exists!"; exit 1; } + +charts_arr=($BUILD_HELM_CHARTS_LIST) +default_component="${charts_arr[0]}" +read -p "Enter a name for component to deploy [${default_component}]: " component +component=${component:-"${default_component}"} + +local_config_dir=${local_config_root}/config + +echo "creating directory ${local_config_root}" +mkdir -p "${local_config_root}" +echo "creating directory ${local_config_dir}/${component}" +mkdir -p "${local_config_dir}/${component}" + +echo "initiazing file ${local_config_root}/kind.yaml" +cat << EOF > ${local_config_root}/kind.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +EOF + +echo "initiazing file ${local_config_dir}/config.env" +cat << EOF > ${local_config_dir}/config.env +IMAGE_CROSSPLANE="crossplane/crossplane" + +echo "replace this with top level config" +PARAM_FROM_TOP_LEVEL_CONFIG="top-level-config" +EOF + +echo "initiazing file ${local_config_dir}/config.validate.sh" +cat << EOF > ${local_config_dir}/config.validate.sh +echo "replace this with top level config validation script" +EOF + +echo "initiazing file ${local_config_dir}/${component}/config.env" +cat << EOF > ${local_config_dir}/${component}/config.env +REQUIRED_IMAGES+=("\${IMAGE_CROSSPLANE}") + +#HELM_CHART_NAME="" +#HELM_CHART_VERSION="" +#HELM_REPOSITORY_NAME="" +#HELM_REPOSITORY_URL="" +#HELM_REPOSITORY_FORCE_UPDATE="false" +#HELM_RELEASE_NAME="" +#HELM_RELEASE_NAMESPACE="default" + +echo "replace this with component config" +PARAM_FROM_COMPONENT_CONFIG="component-config" +EOF + +echo "initiazing file ${local_config_dir}/${component}/pre-deploy.sh" +cat << EOF > ${local_config_dir}/${component}/pre-deploy.sh +# remove this file if the component does not need pre-deploy steps. +echo "running pre-deploy script..." +EOF + +echo "initiazing file ${local_config_dir}/${component}/post-deploy.sh" +cat << EOF > ${local_config_dir}/${component}/post-deploy.sh +# remove this file if the component does not need post-deploy steps. +echo "running post-deploy script..." +EOF + +echo "initiazing file ${local_config_dir}/${component}/.gitignore" +cat << EOF > ${local_config_dir}/${component}/.gitignore +value-overrides.yaml +EOF + +echo "initiazing file ${local_config_dir}/${component}/value-overrides.yaml.tmpl" +cat << EOF > ${local_config_dir}/${component}/value-overrides.yaml.tmpl +image: + pullPolicy: Never + +paramFromTopLevel: {{ .Env.PARAM_FROM_TOP_LEVEL_CONFIG }} +paramFromComponent: {{ .Env.PARAM_FROM_COMPONENT_CONFIG }} +EOF + +echo "done!" + +echo """ +Run the following command to deploy locally built component (or consider adding as a target to makefile): + DEPLOY_LOCAL_DIR=${local_config_root} LOCALDEV_LOCAL_BUILD=true make local.up local.deploy.${component} +""" diff --git a/build/scripts/utils.sh b/build/scripts/utils.sh new file mode 100644 index 0000000..39b436d --- /dev/null +++ b/build/scripts/utils.sh @@ -0,0 +1,61 @@ +################################# setup colors +# setting up colors +BOLD='\033[10;1m' +YLW='\033[0;33m' +GRN='\033[0;32m' +RED='\033[0;31m' +NOC='\033[0m' # No Color + +echo_info(){ + msg="${1}" + [[ -n ${COMPONENT:-} ]] && msg="[${COMPONENT:-}] ${msg}" + printf "${BOLD}%s${NOC}\n" "${msg}" +} + +echo_success(){ + msg="${1}" + [[ -n ${COMPONENT:-} ]] && msg="[${COMPONENT:-}] ${msg}" + printf "${GRN}%s${NOC}\n" "${msg}" +} + +echo_warn(){ + msg="${1}" + [[ -n ${COMPONENT:-} ]] && msg="[${COMPONENT:-}] ${msg}" + printf "${YLW}%s${NOC}\n" "${msg}" +} + +echo_error(){ + msg="${1}" + [[ -n ${COMPONENT:-} ]] && msg="[${COMPONENT:-}] ${msg}" + printf "\n${RED}%s${NOC}\n" "${msg}" + return 1 +} + +################################# + +containsElement () { + local e match="$1" + shift + for e; do [[ "$e" == "$match" ]] && return 0; done + return 1 +} + +pullAndLoadImage () { + i=$1 + # Pull the image: + # - if has a tag "master" or "latest" + # - or does not exist already. + if echo "${i}" | grep ":master\s*$" >/dev/null || echo "${i}" | grep ":latest\s*$" >/dev/null || \ + ! docker inspect --type=image "${i}" >/dev/null 2>&1; then + docker pull "${i}" + fi + "${KIND}" load docker-image "${i}" --name="${KIND_CLUSTER_NAME}" + return 0 +} + +createNamespace () { + n=$1 + # Create namespace if not exists + "${KUBECTL}" --kubeconfig "${KUBECONFIG}" get ns "${n}" >/dev/null 2>&1 || \ + ${KUBECTL} --kubeconfig "${KUBECONFIG}" create ns "${n}" +} \ No newline at end of file diff --git a/cluster/images/provider-confluent/Dockerfile b/cluster/images/provider-confluent/Dockerfile new file mode 100644 index 0000000..61193f6 --- /dev/null +++ b/cluster/images/provider-confluent/Dockerfile @@ -0,0 +1,51 @@ +FROM alpine:3.17.1 +RUN apk --no-cache add ca-certificates bash + +ARG TARGETOS +ARG TARGETARCH + +ADD "bin/${TARGETOS}_${TARGETARCH}/provider" /usr/local/bin/provider + +ENV USER_ID=65532 + +# Setup Terraform environment + +## Provider-dependent configuration +ARG TERRAFORM_VERSION +ARG TERRAFORM_PROVIDER_SOURCE +ARG TERRAFORM_PROVIDER_VERSION +ARG TERRAFORM_PROVIDER_DOWNLOAD_NAME +ARG TERRAFORM_NATIVE_PROVIDER_BINARY +ARG TERRAFORM_PROVIDER_DOWNLOAD_URL_PREFIX + +## End of - Provider-dependent configuration + +ENV PLUGIN_DIR /terraform/provider-mirror/registry.terraform.io/${TERRAFORM_PROVIDER_SOURCE}/${TERRAFORM_PROVIDER_VERSION}/${TARGETOS}_${TARGETARCH} +ENV TF_CLI_CONFIG_FILE /terraform/.terraformrc +ENV TF_FORK 0 + +RUN mkdir -p ${PLUGIN_DIR} + +ADD https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_${TARGETOS}_${TARGETARCH}.zip /tmp +ADD ${TERRAFORM_PROVIDER_DOWNLOAD_URL_PREFIX}/${TERRAFORM_PROVIDER_DOWNLOAD_NAME}_${TERRAFORM_PROVIDER_VERSION}_${TARGETOS}_${TARGETARCH}.zip /tmp +ADD terraformrc.hcl ${TF_CLI_CONFIG_FILE} + +RUN unzip /tmp/terraform_${TERRAFORM_VERSION}_${TARGETOS}_${TARGETARCH}.zip -d /usr/local/bin \ + && chmod +x /usr/local/bin/terraform \ + && rm /tmp/terraform_${TERRAFORM_VERSION}_${TARGETOS}_${TARGETARCH}.zip \ + && unzip /tmp/${TERRAFORM_PROVIDER_DOWNLOAD_NAME}_${TERRAFORM_PROVIDER_VERSION}_${TARGETOS}_${TARGETARCH}.zip -d ${PLUGIN_DIR} \ + && chmod +x ${PLUGIN_DIR}/* \ + && rm /tmp/${TERRAFORM_PROVIDER_DOWNLOAD_NAME}_${TERRAFORM_PROVIDER_VERSION}_${TARGETOS}_${TARGETARCH}.zip \ + && chown -R ${USER_ID}:${USER_ID} /terraform +# End of - Setup Terraform environment + +# Provider controller needs these environment variable at runtime +ENV TERRAFORM_VERSION ${TERRAFORM_VERSION} +ENV TERRAFORM_PROVIDER_SOURCE ${TERRAFORM_PROVIDER_SOURCE} +ENV TERRAFORM_PROVIDER_VERSION ${TERRAFORM_PROVIDER_VERSION} +ENV TERRAFORM_NATIVE_PROVIDER_PATH ${PLUGIN_DIR}/${TERRAFORM_NATIVE_PROVIDER_BINARY} + +USER ${USER_ID} +EXPOSE 8080 + +ENTRYPOINT ["provider"] diff --git a/cluster/images/provider-confluent/Makefile b/cluster/images/provider-confluent/Makefile new file mode 100755 index 0000000..083bec1 --- /dev/null +++ b/cluster/images/provider-confluent/Makefile @@ -0,0 +1,42 @@ +# ==================================================================================== +# Setup Project + +include ../../../build/makelib/common.mk + +# ==================================================================================== +# Options + +include ../../../build/makelib/imagelight.mk + +# ==================================================================================== +# Targets + +img.build: + @$(INFO) docker build $(IMAGE) + @$(MAKE) BUILD_ARGS="--load" img.build.shared + @$(OK) docker build $(IMAGE) + +img.publish: + @$(INFO) Skipping image publish for $(IMAGE) + @echo Publish is deferred to xpkg machinery + @$(OK) Image publish skipped for $(IMAGE) + +img.build.shared: + @cp Dockerfile $(IMAGE_TEMP_DIR) || $(FAIL) + @cp terraformrc.hcl $(IMAGE_TEMP_DIR) || $(FAIL) + @cp -r $(OUTPUT_DIR)/bin/ $(IMAGE_TEMP_DIR)/bin || $(FAIL) + @docker buildx build $(BUILD_ARGS) \ + --platform $(IMAGE_PLATFORMS) \ + --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) \ + --build-arg TERRAFORM_PROVIDER_SOURCE=$(TERRAFORM_PROVIDER_SOURCE) \ + --build-arg TERRAFORM_PROVIDER_VERSION=$(TERRAFORM_PROVIDER_VERSION) \ + --build-arg TERRAFORM_PROVIDER_DOWNLOAD_NAME=$(TERRAFORM_PROVIDER_DOWNLOAD_NAME) \ + --build-arg TERRAFORM_PROVIDER_DOWNLOAD_URL_PREFIX=$(TERRAFORM_PROVIDER_DOWNLOAD_URL_PREFIX) \ + --build-arg TERRAFORM_NATIVE_PROVIDER_BINARY=$(TERRAFORM_NATIVE_PROVIDER_BINARY) \ + -t $(IMAGE) \ + $(IMAGE_TEMP_DIR) || $(FAIL) + +img.promote: + @$(INFO) Skipping image promotion from $(FROM_IMAGE) to $(TO_IMAGE) + @echo Promote is deferred to xpkg machinery + @$(OK) Image promotion skipped for $(FROM_IMAGE) to $(TO_IMAGE) diff --git a/cluster/images/provider-confluent/terraformrc.hcl b/cluster/images/provider-confluent/terraformrc.hcl new file mode 100644 index 0000000..022203c --- /dev/null +++ b/cluster/images/provider-confluent/terraformrc.hcl @@ -0,0 +1,9 @@ +provider_installation { + filesystem_mirror { + path = "/terraform/provider-mirror" + include = ["*/*"] + } + direct { + exclude = ["*/*"] + } +} diff --git a/cluster/local/integration-tests.sh b/cluster/local/integration-tests.sh new file mode 100644 index 0000000..b893573 --- /dev/null +++ b/cluster/local/integration-tests.sh @@ -0,0 +1,196 @@ +#!/usr/bin/env bash +set -e + +# setting up colors +BLU='\033[0;34m' +YLW='\033[0;33m' +GRN='\033[0;32m' +RED='\033[0;31m' +NOC='\033[0m' # No Color +echo_info(){ + printf "\n${BLU}%s${NOC}" "$1" +} +echo_step(){ + printf "\n${BLU}>>>>>>> %s${NOC}\n" "$1" +} +echo_sub_step(){ + printf "\n${BLU}>>> %s${NOC}\n" "$1" +} + +echo_step_completed(){ + printf "${GRN} [✔]${NOC}" +} + +echo_success(){ + printf "\n${GRN}%s${NOC}\n" "$1" +} +echo_warn(){ + printf "\n${YLW}%s${NOC}" "$1" +} +echo_error(){ + printf "\n${RED}%s${NOC}" "$1" + exit 1 +} + +# ------------------------------ +projectdir="$( cd "$( dirname "${BASH_SOURCE[0]}")"/../.. && pwd )" + +# get the build environment variables from the special build.vars target in the main makefile +eval $(make --no-print-directory -C ${projectdir} build.vars) + +# ------------------------------ + +SAFEHOSTARCH="${SAFEHOSTARCH:-amd64}" +CONTROLLER_IMAGE="${BUILD_REGISTRY}/${PROJECT_NAME}-${SAFEHOSTARCH}" + +# tag as latest version to load into kind cluster +K8S_CLUSTER="${K8S_CLUSTER:-${BUILD_REGISTRY}-inttests}" + +CROSSPLANE_NAMESPACE="crossplane-system" +PACKAGE_NAME="provider-gitlab" + +# cleanup on exit +if [ "$skipcleanup" != true ]; then + function cleanup { + echo_step "Cleaning up..." + export KUBECONFIG= + "${KIND}" delete cluster --name="${K8S_CLUSTER}" + } + + trap cleanup EXIT +fi + +# setup package cache +echo_step "setting up local package cache" +CACHE_PATH="${projectdir}/.work/inttest-package-cache" +mkdir -p "${CACHE_PATH}" +echo "created cache dir at ${CACHE_PATH}" +"${UP}" alpha xpkg xp-extract --from-xpkg "${OUTPUT_DIR}"/xpkg/linux_"${SAFEHOSTARCH}"/"${PACKAGE_NAME}"-"${VERSION}".xpkg -o "${CACHE_PATH}/${PACKAGE_NAME}.gz" && chmod 644 "${CACHE_PATH}/${PACKAGE_NAME}.gz" + +# create kind cluster with extra mounts +KIND_NODE_IMAGE="kindest/node:${KIND_NODE_IMAGE_TAG}" +echo_step "creating k8s cluster using kind ${KIND_VERSION} and node image ${KIND_NODE_IMAGE}" +KIND_CONFIG="$( cat < "PAUSED") and resuming ("PAUSED" -> "RUNNING") a connector is supported via an update operation. + environment: '(Required Configuration Block) supports the following:' + id: '- (Required String) The ID of the Environment that the connector belongs to, for example, env-abc123.' + kafka_cluster: '(Optional Configuration Block) supports the following:' + name: '- (Required String) The configuration setting name, for example, connector.class.' + value: '- (Required String) The configuration setting value, for example, S3_SINK.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_connector.my_connector "env-abc123/lkc-abc123/S3_SINKConnector_0" + confluent_environment Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_environment Resource - terraform-provider-confluent + title: confluent_environment Resource - terraform-provider-confluent + argumentDocs: + display_name: '- (Required String) A human-readable name for the Environment. Start and end the name with alphanumeric characters, for example, "Development". The name can contain hyphens and underscores.' + id: '- (Required String) The ID of the Environment, for example, env-abc123.' + resource_name: '- (Required String) The Confluent Resource Name of the Environment, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_environment.my_env env-abc123 + confluent_identity_pool Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_identity_pool Resource - terraform-provider-confluent + title: confluent_identity_pool Resource - terraform-provider-confluent + argumentDocs: + description: '- (Required String) A description for the Identity Pool.' + display_name: '- (Required String) A human-readable name for the Identity Pool.' + filter: '- (Required String) A filter expression in Supported Common Expression Language (CEL) that specifies which identities can authenticate using your identity pool (see Set identity pool filters for more details).' + id: '- (Required String) The ID of the Identity Provider associated with the Identity Pool, for example, op-abc123.' + identity_claim: '- (Required String) The JSON Web Token (JWT) claim to extract the authenticating identity to Confluent resources from (see Registered Claim Names for more details). This appears in the audit log records, showing, for example, that "identity Z used identity pool X to access topic A".' + identity_provider: '(Required Configuration Block) supports the following:' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_identity_pool.example op-abc123/pool-xyz456 + confluent_identity_provider Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_identity_provider Resource - terraform-provider-confluent + title: confluent_identity_provider Resource - terraform-provider-confluent + argumentDocs: + description: '- (Required String) A description for the Identity Provider.' + display_name: '- (Required String) A human-readable name for the Identity Provider.' + id: '- (Required String) The ID of the Identity Provider, for example, op-abc123.' + issuer: '- (Required String) A publicly reachable issuer URI for the Identity Provider. The unique issuer URI string represents the entity for issuing tokens.' + jwks_uri: '- (Required String) A publicly reachable JSON Web Key Set (JWKS) URI for the Identity Provider. A JSON Web Key Set (JWKS) provides a set of keys containing the public keys used to verify any JSON Web Token (JWT) issued by your OAuth 2.0 identity provider.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_identity_provider.example op-abc123 + confluent_kafka_acl Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_kafka_acl Resource - terraform-provider-confluent + title: confluent_kafka_acl Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + host: '- (Required String) The host for the ACL. Should be set to * for Confluent Cloud.' + id: '- (Required String) The ID of the Kafka cluster, for example, lkc-abc123.' + kafka_cluster: '- (Optional Configuration Block) supports the following:' + key: '- (Required String) The Kafka API Key.' + operation: '- (Required String) The operation type for the ACL. Accepted values are: UNKNOWN, ANY, ALL, READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE, CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, and IDEMPOTENT_WRITE. See Authorization using ACLs to find mappings of (resource_type, operation) to one or more Kafka APIs or request types.' + pattern_type: '- (Required String) The pattern type for the ACL. Accepted values are: LITERAL and PREFIXED.' + permission: '- (Required String) The permission for the ACL. Accepted values are: UNKNOWN, ANY, DENY, and ALLOW.' + principal: '- (Required String) The principal for the ACL.' + resource_name: '- (Required String) The resource name for the ACL. Must be kafka-cluster if resource_type equals to CLUSTER.' + resource_type: '- (Required String) The type of the resource. Accepted values are: UNKNOWN, ANY, TOPIC, GROUP, CLUSTER, TRANSACTIONAL_ID, DELEGATION_TOKEN. See Authorization using ACLs to find definitions of resource types and mappings of (resource_type, operation) to one or more Kafka APIs or request types.' + rest_endpoint: '- (Optional String) The REST endpoint of the Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443.' + secret: '- (Required String, Sensitive) The Kafka API Secret.' + importStatements: + - |- + # Option #1: Manage multiple Kafka clusters in the same Terraform workspace + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ export IMPORT_KAFKA_API_KEY="" + $ export IMPORT_KAFKA_API_SECRET="" + $ export IMPORT_KAFKA_REST_ENDPOINT="" + $ terraform import confluent_kafka_acl.describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" + + # Option #2: Manage a single Kafka cluster in the same Terraform workspace + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_kafka_acl.describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" + confluent_kafka_client_quota Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_kafka_client_quota Resource - terraform-provider-confluent + title: confluent_kafka_client_quota Resource - terraform-provider-confluent + argumentDocs: + description: '- (Optional String) The description of the Kafka Client Quota.' + display_name: '- (Required String) The name of the Kafka Client Quota.' + egress_byte_rate: '- (Optional String) The egress throughput limit in bytes per second.' + environment: '(Required Configuration Block) supports the following:' + id: '- (Required String) The ID of the Kafka Cluster where the Kafka Client Quota is applied, for example, lkc-abc123.' + ingress_byte_rate: '- (Optional String) The ingress throughput limit in bytes per second.' + kafka_cluster: '(Required Configuration Block) supports the following:' + principals: '- (Required Set of Strings) The list of principals (i.e., service accounts or identity pools) to apply the Kafka Client Quota to. Use the special name, "", to represent the default quota for all users and service accounts.' + throughput: '(Required Configuration Block) supports the following:' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_kafka_client_quota.example cq-abc123 + confluent_kafka_cluster Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_kafka_cluster Resource - terraform-provider-confluent + title: confluent_kafka_cluster Resource - terraform-provider-confluent + argumentDocs: + api_version: '- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.' + availability: '- (Required String) The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE and MULTI_ZONE.' + basic: '- (Optional Configuration Block) The configuration of the Basic Kafka cluster.' + basic.environment: '(Required Configuration Block) supports the following:' + basic.network: '(Optional Configuration Block) supports the following:' + bootstrap_endpoint: '- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).' + cku: '- (Required Number) The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have more than 2 CKUs.' + cloud: '- (Required String) The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.' + dedicated: '- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:' + display_name: '- (Required String) The name of the Kafka cluster.' + encryption_key: '- (Optional String) The ID of the encryption key that is used to encrypt the data in the Kafka cluster, for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab (key Amazon Resource Name) for AWS or projects/my-test-project/locations/global/keyRings/test-byok/cryptoKeys/test for GCP. Append required permissions to the key policy before creating a Kafka cluster, see Encrypt Confluent Cloud Clusters using Self-Managed Keys for more details. At the moment, self-managed encryption keys are only available for the Dedicated clusters on AWS or GCP.' + id: '- (Required String) The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.' + kind: '- (Required String) A kind of the Kafka cluster, for example, Cluster.' + rbac_crn: '- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.' + region: '- (Required String) The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.' + rest_endpoint: '- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).' + standard: '- (Optional Configuration Block) The configuration of the Standard Kafka cluster.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_kafka_cluster.my_kafka env-abc123/lkc-abc123 + confluent_kafka_cluster_config Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_kafka_cluster_config Resource - terraform-provider-confluent + title: confluent_kafka_cluster_config Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.config: '- (Optional Map) The custom cluster settings to set:' + id: '- (Required String) The ID of the Dedicated Kafka cluster, for example, lkc-abc123.' + kafka_cluster: '- (Optional Configuration Block) supports the following:' + key: '- (Required String) The Kafka API Key.' + name: '- (Required String) The setting name, for example, auto.create.topics.enable.' + rest_endpoint: '- (Optional String) The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).' + secret: '- (Required String, Sensitive) The Kafka API Secret.' + value: '- (Required String) The setting value, for example, true.' + importStatements: + - |- + # Option #1: Manage multiple Kafka clusters in the same Terraform workspace + $ export IMPORT_KAFKA_API_KEY="" + $ export IMPORT_KAFKA_API_SECRET="" + $ export IMPORT_KAFKA_REST_ENDPOINT="" + $ terraform import confluent_kafka_cluster_config.test lkc-abc123 + + # Option #2: Manage a single Kafka cluster in the same Terraform workspace + $ terraform import confluent_kafka_cluster_config.test lkc-abc123 + confluent_kafka_mirror_topic Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_kafka_mirror_topic Resource - terraform-provider-confluent + title: confluent_kafka_mirror_topic Resource - terraform-provider-confluent + argumentDocs: + cluster_link: '- (Required Configuration Block) supports the following:' + credentials: '(Optional Configuration Block) supports the following:' + id: '- (Required String) The ID of the destination Kafka cluster, for example, lkc-abc123.' + kafka_cluster: '- (Required Configuration Block) supports the following:' + key: '- (Required String) The Kafka API Key.' + link_name: '- (Required String) The name of the cluster link to attach to the mirror topic, for example, my-cluster-link.' + mirror_topic_name: '- (Optional String) The name of the mirror topic. Only required when there is a prefix configured on the cluster link. For example, when is configured for the cluster link, the mirror topic name has to be of the format .' + rest_endpoint: '- (Optional String) The REST endpoint of the destination Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).' + secret: '- (Required String, Sensitive) The Kafka API Secret.' + source_kafka_topic: '- (Required Configuration Block) supports the following:' + status: (Optional String) The status of the mirror topic. The supported values are "ACTIVE", "PAUSED", "PROMOTED", "FAILED_OVER". Pausing ("ACTIVE" -> "PAUSED"), resuming ("PAUSED" -> "ACTIVE"), promoting, and failing over a mirror topic is supported via an update operation. Defaults to "ACTIVE". + topic_name: '- (Required String) The name of the topic on the source cluster to be mirrored over the cluster link, for example, orders. A topic with the exact same name must exist on the source cluster, and no topic with this name should exist on the destination cluster.' + importStatements: + - |- + $ export IMPORT_KAFKA_API_KEY="" + $ export IMPORT_KAFKA_API_SECRET="" + $ export IMPORT_KAFKA_REST_ENDPOINT="" + $ terraform import confluent_kafka_mirror_topic.my_mirror_topic lkc-abc123/my-cluster-link/orders-123 + confluent_kafka_topic Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_kafka_topic Resource - terraform-provider-confluent + title: confluent_kafka_topic Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.config: '- (Optional Map) The custom topic settings to set:' + credentials.partitions_count: '- (Optional Number) The number of partitions to create in the topic. Defaults to 6.' + id: '- (Required String) The ID of the Kafka cluster, for example, lkc-abc123.' + kafka_cluster: '- (Optional Configuration Block) supports the following:' + key: '- (Required String) The Kafka API Key.' + name: '- (Required String) The setting name, for example, cleanup.policy.' + rest_endpoint: '- (Optional String) The REST endpoint of the Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).' + secret: '- (Required String, Sensitive) The Kafka API Secret.' + topic_name: '- (Required String) The name of the topic, for example, orders-1. The topic name can be up to 249 characters in length, and can include the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), and - (dash). As a best practice, we recommend against using any personally identifiable information (PII) when naming your topic.' + value: '- (Required String) The setting value, for example, compact.' + importStatements: + - |- + # Option #1: Manage multiple Kafka clusters in the same Terraform workspace + $ export IMPORT_KAFKA_API_KEY="" + $ export IMPORT_KAFKA_API_SECRET="" + $ export IMPORT_KAFKA_REST_ENDPOINT="" + $ terraform import confluent_kafka_topic.my_topic lkc-abc123/orders-123 + + # Option #2: Manage a single Kafka cluster in the same Terraform workspace + $ terraform import confluent_kafka_topic.my_topic lkc-abc123/orders-123 + confluent_ksql_cluster Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_ksql_cluster Resource - terraform-provider-confluent + title: confluent_ksql_cluster Resource - terraform-provider-confluent + argumentDocs: + api_version: '- (Required String) An API Version of the schema version of the ksqlDB cluster, for example, ksqldbcm/v2.' + credential_identity: '(Required Configuration Block) supports the following:' + csu: '- (Required Number) The number of Confluent Streaming Units (CSUs) for the ksqlDB cluster.' + display_name: '- (Required String) The name of the ksqlDB cluster.' + environment: '(Required Configuration Block) supports the following:' + id: '- (Required String) The ID of the associated Environment, for example, env-xyz456.' + kafka_cluster: '(Required Configuration Block) supports the following:' + kind: '- (Required String) A kind of the ksqlDB cluster, for example, Cluster.' + resource_name: '- (Required String) The Confluent Resource Name of the ksqlDB cluster.' + rest_endpoint: '- (Required String) The API endpoint of the ksqlDB cluster, for example, https://pksqlc-00000.us-central1.gcp.glb.confluent.cloud.' + storage: '- (Required Integer) The amount of storage (in GB) provisioned to the ksqlDB cluster.' + topic_prefix: '- (Required String) Topic name prefix used by this ksqlDB cluster. Used to assign ACLs for this ksqlDB cluster to use, for example, pksqlc-00000.' + use_detailed_processing_log: (Optional Boolean) Controls whether the row data should be included in the processing log topic. Set it to false if you don't want to emit sensitive information to the processing log. Defaults to true. + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_ksql_cluster.example env-abc123/lksqlc-abc123 + confluent_network Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_network Resource - terraform-provider-confluent + title: confluent_network Resource - terraform-provider-confluent + argumentDocs: + account: '- (Required String) The AWS account ID associated with the Confluent Cloud VPC.' + aws: '- (Optional Configuration Block) The AWS-specific network details if available. It supports the following:' + azure: '- (Optional Configuration Block) The Azure-specific network details if available. It supports the following:' + cidr: '- (Required String) The IPv4 CIDR block to used for the network. Must be /16. Required for VPC peering and AWS TransitGateway.' + cloud: '- (Required String) The cloud service provider in which the network exists. Accepted values are: AWS, AZURE, and GCP.' + connection_types: '- (Required List of String) The list of connection types that may be used with the network. Accepted connection types are: PEERING, TRANSITGATEWAY, and PRIVATELINK.' + display_name: '- (Required String) The name of the Network.' + dns_config: '(Optional Configuration Block) Network DNS config. It applies only to the PRIVATELINK network connection type. It supports the following:' + dns_domain: '- (Optional String) The root DNS domain for the network, for example, pr123a.us-east-2.aws.confluent.cloud if applicable. Present on Networks that support Private Link.' + environment: '(Required Configuration Block) supports the following:' + gcp: '- (Optional Configuration Block) The GCP-specific network details if available. It supports the following:' + id: '- (Required String) The ID of the Environment that the Network belongs to, for example, env-abc123.' + private_link_endpoint_service: '- (Optional String) The endpoint service of the Confluent Cloud VPC (used for PrivateLink) if available.' + private_link_service_aliases: '- (Optional Map) The mapping of zones to Private Link Service Aliases if available. Keys are zone names, for example, 1 and values are Azure Private Link Service Aliases, for example, s-nk99e-privatelink-1.8c43dcd0-695c-1234-bc35-11fe6abb303a.centralus.azure.privatelinkservice.' + private_service_connect_service_attachments: '- (Optional Map) The mapping of zones to Private Service Connect Service Attachments if available. Keys are zones and values are GCP Private Service Connect service attachment.' + project: '- (Required String) The GCP Project ID associated with the Confluent Cloud VPC.' + region: '- (Required String) The cloud provider region where the network exists.' + resolution: |- + - (Required String) Network DNS resolution. + When resolution is CHASED_PRIVATE, clusters in this network require both public and private DNS to resolve cluster endpoints. + When resolution is PRIVATE, clusters in this network only require private DNS to resolve cluster endpoints. + resource_name: '- (Required String) The Confluent Resource Name of the Network.' + vpc: '- (Required String) The Confluent Cloud VPC ID.' + vpc_network: '- (Required String) The network name of the Confluent Cloud VPC.' + zonal_subdomains: '- (Optional Map) The DNS subdomain for each zone. Present on networks that support Private Link. Keys are zone names, for example, use2-az1 and values are DNS domains, for example, use2-az1.pr123a.us-east-2.aws.confluent.cloud.' + zones: |- + - (Optional List of String) The 3 availability zones for this network. They can optionally be specified for AWS networks + used with PrivateLink, for GCP networks used with Private Service Connect, and for AWS and GCP + networks used with Peering. Otherwise, they are automatically chosen by Confluent Cloud. + On AWS, zones are AWS AZ IDs, for example, use1-az3. + On GCP, zones are GCP zones, for example, us-central1-c. + On Azure, zones are Confluent-chosen names (for example, 1, 2, 3) since Azure does not have universal zone identifiers. + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_network.my_network env-abc123/n-abc123 + confluent_peering Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_peering Resource - terraform-provider-confluent + title: confluent_peering Resource - terraform-provider-confluent + argumentDocs: + account: '- (Required String) The AWS Account ID of the peer VPC owner. You can find your AWS Account ID here under My Account section of the AWS Management Console. Must be a 12 character string.' + aws: '- (Optional Configuration Block) The AWS-specific Peering details if available. It supports the following:' + azure: '- (Optional Configuration Block) The Azure-specific Peering details if available. It supports the following:' + customer_region: '- (Required String) The region of the AWS peer VPC.' + display_name: '- (Optional String) The name of the Peering.' + environment: '(Required Configuration Block) supports the following:' + gcp: '- (Optional Configuration Block) The Azure-specific Peering details if available. It supports the following:' + id: '- (Required String) The ID of the Environment that the Peering belongs to, for example, env-abc123.' + import_custom_routes: '- (Optional Boolean) The Import Custom Routes option enables connectivity to a Confluent Cloud cluster in Google Cloud from customer premise or other clouds, such as AWS and Azure, through a customer VPC that is peered with Confluent Cloud in the same region. Defaults to false. Learn more about considerations / limitations of the Import Custom Routes option here.' + network: '(Required Configuration Block) supports the following:' + project: '- (Required String) The GCP Project ID. You can find your Google Cloud Project ID under Project ID section of your Google Cloud Console dashboard.' + routes: '- (Required String) The AWS VPC CIDR blocks or subsets. This must be from the supported CIDR blocks and must not overlap with your Confluent Cloud CIDR block or any other network peering connection VPC CIDR (learn more about the requirements here). You can find AWS VPC CIDR here under Your VPCs -> Target VPC -> Details section of the AWS Management Console.' + tenant: '- (Required String) The Tenant ID that represents an organization in Azure Active Directory. You can find your Azure Tenant ID in the Azure Portal under Azure Active Directory. Must be a valid 32 character UUID string.' + vnet: '- (Required String) The resource (composite) ID of the peer Virtual Network that you''re peering with Confluent Cloud, in the format /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/. You can find Subscription ID, Resource Group Name and your VNet name under Virtual Networks -> Target VNet -> Essentials section of your Microsoft Azure Portal.' + vpc: '- (Required String) The AWS VPC ID of the peer VPC that you''re peering with Confluent Cloud. You can find your AWS VPC ID here under Your VPCs section of the AWS Management Console. Must start with vpc-.' + vpc_network: '- (Required String) The VPC network name that you''re peering to Confluent Cloud. You can find your VPC network name under VPC Networks section of your Google Cloud Console.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_peering.my_peer env-abc123/peer-abc123 + confluent_private_link_access Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_private_link_access Resource - terraform-provider-confluent + title: confluent_private_link_access Resource - terraform-provider-confluent + argumentDocs: + account: '- (Required String) The AWS account ID to enable for the Private Link Access. You can find your AWS account ID [here] (https://console.aws.amazon.com/billing/home?#/account) under My Account in your AWS Management Console. Must be a 12 character string.' + aws: '- (Optional Configuration Block) The AWS-specific Private Link Access details if available. It supports the following:' + azure: '- (Optional Configuration Block) The Azure-specific Private Link Access details if available. It supports the following:' + display_name: '- (Optional String) The name of the Private Link Access.' + environment: '(Required Configuration Block) supports the following:' + gcp: '- (Optional Configuration Block) The GCP-specific Private Service Connect details if available. It supports the following:' + id: '- (Required String) The ID of the Environment that the Private Link Access belongs to, for example, env-abc123.' + network: '(Required Configuration Block) supports the following:' + project: '- (Required String) The GCP project ID to allow for Private Service Connect access. You can find your Google Cloud Project ID under Project ID section of your Google Cloud Console dashboard.' + subscription: '- (Required String) The Azure subscription ID to enable for the Private Link Access. You can find your Azure subscription ID in the subscription section of your [Microsoft Azure Portal] (https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade). Must be a valid 32 character UUID string.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_private_link_access.my_pla env-abc123/pla-abc123 + confluent_role_binding Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_role_binding Resource - terraform-provider-confluent + title: confluent_role_binding Resource - terraform-provider-confluent + argumentDocs: + crn_pattern: '- (Required String) A Confluent Resource Name(CRN) that specifies the scope and resource patterns necessary for the role to bind.' + id: '- (Required String) The ID of the Role Binding (e.g., rb-f3a90de).' + principal: '- (Required String) A principal User to bind the role to, for example, "User:u-111aaa" for binding to a user "u-111aaa", or "User:sa-111aaa" for binding to a service account "sa-111aaa".' + role_name: '- (Required String) A name of the role to bind to the principal. See Confluent Cloud RBAC Roles for a full list of supported role names.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_role_binding.my_rb rb-f3a90de + confluent_schema Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_schema Resource - terraform-provider-confluent + title: confluent_schema Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.format: '- (Required String) The format of the schema. Accepted values are: AVRO, PROTOBUF, and JSON.' + credentials.hard_delete: '- (Optional Boolean) An optional flag to control whether a schema should be soft or hard deleted. Set it to true if you want to hard delete a schema on destroy (see Schema Deletion Guidelines for more details). Must be unset when importing. Defaults to false (soft delete).' + credentials.recreate_on_update: '- (Optional Boolean) An optional flag to control whether a schema should be recreated on an update. Set it to true if you want to manage different schema versions using different resource instances. Must be set to the target value when importing. Defaults to false (resource instance always points to the latest schema by supporting in-place updates).' + credentials.schema: '- (Required String) The schema string, for example, file("./schema_version_1.avsc").' + credentials.schema_reference: '- (Optional List) The list of referenced schemas (see Schema References for more details):' + credentials.subject_name: '- (Required String) The name of the subject (in other words, the namespace), representing the subject under which the schema will be registered, for example, test-subject. Schemas evolve safely, following a compatibility mode defined, under a subject name.' + id: '- (Required String) The ID of the Schema Registry cluster, for example, lsrc-abc123.' + key: '- (Required String) The Schema Registry API Key.' + name: '- (Required String) The name of the subject, representing the subject under which the referenced schema is registered.' + rest_endpoint: '- (Optional String) The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443).' + schema_identifier: '- (Required Integer) The globally unique ID of the Schema, for example, 100003. If the same schema is registered under a different subject, the same identifier will be returned. However, the version of the schema may be different under different subjects.' + schema_registry_cluster: '- (Optional Configuration Block) supports the following:' + secret: '- (Required String, Sensitive) The Schema Registry API Secret.' + subject_name: '- (Required String) The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.)' + version: '- (Required Integer) The version, representing the exact version of the schema under the registered subject.' + importStatements: + - |- + # Option A: recreate_on_update = false (by default) + $ export IMPORT_SCHEMA_REGISTRY_API_KEY="" + $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" + $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + $ terraform import confluent_schema.my_schema_1 lsrc-abc123/test-subject/latest + + # Option B: recreate_on_update = true + $ export IMPORT_SCHEMA_REGISTRY_API_KEY="" + $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" + $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + $ terraform import confluent_schema.my_schema_1 lsrc-abc123/test-subject/100003 + confluent_schema_registry_cluster Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_schema_registry_cluster Resource - terraform-provider-confluent + title: confluent_schema_registry_cluster Resource - terraform-provider-confluent + argumentDocs: + api_version: '- (Required String) An API Version of the schema version of the Schema Registry cluster, for example, srcm/v2.' + display_name: '- (Required String) The name of the Schema Registry cluster, for example, Stream Governance Package.' + environment: '(Required Configuration Block) supports the following:' + id: '- (Required String) The ID of the Environment that the Schema Registry cluster belongs to, for example, env-abc123.' + kind: '- (Required String) A kind of the Schema Registry cluster, for example, Cluster.' + package: '- (Required String) The type of the billing package. Accepted values are: ESSENTIALS and ADVANCED.' + region: '(Required Configuration Block) supports the following:' + resource_name: '- (Required String) The Confluent Resource Name of the Schema Registry cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/schema-registry=lsrc-abc123.' + rest_endpoint: '- (Required String) The HTTP endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-west-2.aws.confluent.cloud.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_schema_registry_cluster.example env-abc123/lsrc-abc123 + confluent_schema_registry_cluster_config Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_schema_registry_cluster_config Resource - terraform-provider-confluent + title: confluent_schema_registry_cluster_config Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.compatibility_level: '- (Optional String) The global Schema Registry compatibility level. Accepted values are: BACKWARD, BACKWARD_TRANSITIVE, FORWARD, FORWARD_TRANSITIVE, FULL, FULL_TRANSITIVE, and NONE. See the Compatibility Types for more details.' + id: '- (Required String) The ID of the Schema Registry cluster, for example, lsrc-abc123.' + key: '- (Required String) The Schema Registry API Key.' + rest_endpoint: '- (Optional String) The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443).' + schema_registry_cluster: '- (Optional Configuration Block) supports the following:' + secret: '- (Required String, Sensitive) The Schema Registry API Secret.' + importStatements: + - |- + $ export IMPORT_SCHEMA_REGISTRY_API_KEY="" + $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" + $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + $ terraform import confluent_schema_registry_cluster_config.example lsrc-abc123 + confluent_schema_registry_cluster_mode Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_schema_registry_cluster_mode Resource - terraform-provider-confluent + title: confluent_schema_registry_cluster_mode Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.mode: '- (Optional String) The global Schema Registry mode. Accepted values are: READWRITE, READONLY, READONLY_OVERRIDE, and IMPORT.' + id: '- (Required String) The ID of the Schema Registry cluster, for example, lsrc-abc123.' + key: '- (Required String) The Schema Registry API Key.' + rest_endpoint: '- (Optional String) The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443).' + schema_registry_cluster: '- (Optional Configuration Block) supports the following:' + secret: '- (Required String, Sensitive) The Schema Registry API Secret.' + importStatements: + - |- + $ export IMPORT_SCHEMA_REGISTRY_API_KEY="" + $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" + $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + $ terraform import confluent_schema_registry_cluster_mode.example lsrc-abc123 + confluent_service_account Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_service_account Resource - terraform-provider-confluent + title: confluent_service_account Resource - terraform-provider-confluent + argumentDocs: + api_version: '- (Required String) An API Version of the schema version of the Service Account, for example, iam/v2.' + description: '- (Optional String) A free-form description of the Service Account.' + display_name: '- (Required String) A human-readable name for the Service Account.' + id: '- (Required String) The ID of the Service Account (e.g., sa-abc123).' + kind: '- (Required String) A kind of the Service Account, for example, ServiceAccount.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_service_account.my_sa sa-abc123 + confluent_subject_config Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_subject_config Resource - terraform-provider-confluent + title: confluent_subject_config Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.compatibility_level: '- (Optional String) The Compatibility Level of the specified subject. Accepted values are: BACKWARD, BACKWARD_TRANSITIVE, FORWARD, FORWARD_TRANSITIVE, FULL, FULL_TRANSITIVE, and NONE. See the Compatibility Types for more details.' + credentials.subject_name: '- (Required String) The name of the subject (in other words, the namespace), representing the subject under which the schema will be registered, for example, test-subject.' + id: '- (Required String) The ID of the Schema Registry cluster, for example, lsrc-abc123.' + key: '- (Required String) The Schema Registry API Key.' + rest_endpoint: '- (Optional String) The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443).' + schema_registry_cluster: '- (Optional Configuration Block) supports the following:' + secret: '- (Required String, Sensitive) The Schema Registry API Secret.' + importStatements: + - |- + $ export IMPORT_SCHEMA_REGISTRY_API_KEY="" + $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" + $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + $ terraform import confluent_subject_config.example lsrc-abc123/test-subject + confluent_subject_mode Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_subject_mode Resource - terraform-provider-confluent + title: confluent_subject_mode Resource - terraform-provider-confluent + argumentDocs: + credentials: '(Optional Configuration Block) supports the following:' + credentials.mode: '- (Optional String) The mode of the specified subject. Accepted values are: READWRITE, READONLY, READONLY_OVERRIDE, and IMPORT.' + credentials.subject_name: '- (Required String) The name of the subject (in other words, the namespace), representing the subject under which the schema will be registered, for example, test-subject.' + id: '- (Required String) The ID of the Schema Registry cluster, for example, lsrc-abc123.' + key: '- (Required String) The Schema Registry API Key.' + rest_endpoint: '- (Optional String) The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443).' + schema_registry_cluster: '- (Optional Configuration Block) supports the following:' + secret: '- (Required String, Sensitive) The Schema Registry API Secret.' + importStatements: + - |- + $ export IMPORT_SCHEMA_REGISTRY_API_KEY="" + $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" + $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + $ terraform import confluent_subject_mode.example lsrc-abc123/test-subject + confluent_transit_gateway_attachment Resource - terraform-provider-confluent: + subCategory: "" + name: confluent_transit_gateway_attachment Resource - terraform-provider-confluent + title: confluent_transit_gateway_attachment Resource - terraform-provider-confluent + argumentDocs: + aws: '- (Required Configuration Block) The AWS-specific Transit Gateway Attachment details. It supports the following:' + display_name: '- (Optional String) The name of the Transit Gateway Attachment.' + environment: '(Required Configuration Block) supports the following:' + id: '- (Required String) The ID of the Environment that the Transit Gateway Attachment belongs to, for example, env-abc123.' + network: '(Required Configuration Block) supports the following:' + ram_resource_share_arn: '- (Required String) The Amazon Resource Name (ARN) of the Resource Access Manager (RAM) Resource Share of the transit gateway your Confluent Cloud network attaches to.' + routes: '- (Required List of String) List of destination routes for traffic from Confluent VPC to customer VPC via Transit Gateway.' + transit_gateway_attachment_id: '- (Required String) The ID of the AWS Transit Gateway VPC Attachment that attaches Confluent VPC to Transit Gateway.' + transit_gateway_id: '- (Required String) The ID of the AWS Transit Gateway that you want Confluent CLoud to be attached to. Must start with tgw-.' + importStatements: + - |- + $ export CONFLUENT_CLOUD_API_KEY="" + $ export CONFLUENT_CLOUD_API_SECRET="" + $ terraform import confluent_transit_gateway_attachment.my_tgwa env-abc123/tgwa-abc123 diff --git a/config/provider.go b/config/provider.go new file mode 100644 index 0000000..70b7cc8 --- /dev/null +++ b/config/provider.go @@ -0,0 +1,43 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package config + +import ( + // Note(turkenh): we are importing this to embed provider schema document + _ "embed" + + ujconfig "github.com/upbound/upjet/pkg/config" +) + +const ( + resourcePrefix = "confluent" + modulePath = "github.com/crossplane-contrib/provider-confluent" +) + +//go:embed schema.json +var providerSchema string + +//go:embed provider-metadata.yaml +var providerMetadata string + +// GetProvider returns provider configuration +func GetProvider() *ujconfig.Provider { + pc := ujconfig.NewProvider([]byte(providerSchema), resourcePrefix, modulePath, []byte(providerMetadata), + ujconfig.WithRootGroup("crossplane.io"), + ujconfig.WithIncludeList(ExternalNameConfigured()), + ujconfig.WithFeaturesPackage("internal/features"), + ujconfig.WithDefaultResourceOptions( + ExternalNameConfigurations(), + )) + + for _, configure := range []func(provider *ujconfig.Provider){ + // add custom config functions + } { + configure(pc) + } + + pc.ConfigureResources() + return pc +} diff --git a/config/schema.json b/config/schema.json new file mode 100644 index 0000000..bffde45 --- /dev/null +++ b/config/schema.json @@ -0,0 +1 @@ +{"format_version":"1.0","provider_schemas":{"registry.terraform.io/confluentinc/confluent":{"provider":{"version":0,"block":{"attributes":{"cloud_api_key":{"type":"string","description":"The Confluent Cloud API Key.","description_kind":"markdown","optional":true,"sensitive":true},"cloud_api_secret":{"type":"string","description":"The Confluent Cloud API Secret.","description_kind":"markdown","optional":true,"sensitive":true},"endpoint":{"type":"string","description":"The base endpoint of Confluent Cloud API. Defaults to `https://api.confluent.cloud`.","description_kind":"markdown","optional":true},"kafka_api_key":{"type":"string","description":"The Kafka Cluster API Key.","description_kind":"markdown","optional":true,"sensitive":true},"kafka_api_secret":{"type":"string","description":"The Kafka Cluster API Secret.","description_kind":"markdown","optional":true,"sensitive":true},"kafka_id":{"type":"string","description":"The Kafka Cluster ID.","description_kind":"markdown","optional":true},"kafka_rest_endpoint":{"type":"string","description":"The Kafka Cluster REST Endpoint.","description_kind":"markdown","optional":true},"max_retries":{"type":"number","description":"Maximum number of retries of HTTP client. Defaults to 4.","description_kind":"markdown","optional":true},"schema_registry_api_key":{"type":"string","description":"The Schema Registry Cluster API Key.","description_kind":"markdown","optional":true,"sensitive":true},"schema_registry_api_secret":{"type":"string","description":"The Schema Registry Cluster API Secret.","description_kind":"markdown","optional":true,"sensitive":true},"schema_registry_id":{"type":"string","description":"The Schema Registry Cluster ID.","description_kind":"markdown","optional":true},"schema_registry_rest_endpoint":{"type":"string","description":"The Schema Registry Cluster REST Endpoint.","description_kind":"markdown","optional":true}},"description_kind":"plain"}},"resource_schemas":{"confluent_api_key":{"version":0,"block":{"attributes":{"description":{"type":"string","description":"A free-form description of the API key.","description_kind":"markdown","optional":true},"disable_wait_for_ready":{"type":"bool","description":"Defaults to `false`.","description_kind":"markdown","optional":true},"display_name":{"type":"string","description":"A human-readable name for the API key.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"secret":{"type":"string","description":"The API Key Secret.","description_kind":"markdown","computed":true,"sensitive":true}},"block_types":{"managed_resource":{"nesting_mode":"list","block":{"attributes":{"api_version":{"type":"string","description":"The API version of the referred owner.","description_kind":"markdown","required":true},"id":{"type":"string","description":"The unique identifier for the referred resource.","description_kind":"markdown","required":true},"kind":{"type":"string","description":"The kind of the referred resource.","description_kind":"markdown","required":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1}},"description":"The resource associated with this object. The only resource that is supported is 'cmk.v2.Cluster', 'srcm.v2.Cluster'.","description_kind":"markdown"},"max_items":1},"owner":{"nesting_mode":"list","block":{"attributes":{"api_version":{"type":"string","description":"The API version of the referred owner.","description_kind":"markdown","required":true},"id":{"type":"string","description":"The unique identifier for the referred owner.","description_kind":"markdown","required":true},"kind":{"type":"string","description":"The kind of the referred owner.","description_kind":"markdown","required":true}},"description":"The owner to which the API Key belongs. The owner can be one of 'iam.v2.User', 'iam.v2.ServiceAccount'.","description_kind":"markdown"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_cluster_link":{"version":0,"block":{"attributes":{"config":{"type":["map","string"],"description":"The custom cluster link settings to set (e.g., `\"acl.sync.ms\" = \"5100\"`).","description_kind":"markdown","optional":true,"computed":true},"connection_mode":{"type":"string","description":"The connection mode of the Cluster Link. Defaults to `OUTBOUND`.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"link_mode":{"type":"string","description":"The mode of the Cluster Link. Defaults to `DESTINATION`.","description_kind":"markdown","optional":true},"link_name":{"type":"string","description":"The name of the Cluster Link.","description_kind":"markdown","required":true}},"block_types":{"destination_kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"bootstrap_endpoint":{"type":"string","description":"The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., `SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092` or pkc-00000.us-central1.gcp.confluent.cloud:9092`).","description_kind":"markdown","optional":true},"id":{"type":"string","description":"The unique identifier for the referred Kafka cluster.","description_kind":"markdown","required":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Kafka API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Kafka API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Kafka API Credentials.","description_kind":"markdown"},"max_items":1}},"description_kind":"plain"},"min_items":1,"max_items":1},"source_kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"bootstrap_endpoint":{"type":"string","description":"The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., `SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092` or pkc-00000.us-central1.gcp.confluent.cloud:9092`).","description_kind":"markdown","optional":true},"id":{"type":"string","description":"The unique identifier for the referred Kafka cluster.","description_kind":"markdown","required":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Kafka API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Kafka API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Kafka API Credentials.","description_kind":"markdown"},"max_items":1}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_connector":{"version":0,"block":{"attributes":{"config_nonsensitive":{"type":["map","string"],"description":"The nonsensitive configuration settings to set (e.g., `\"time.interval\" = \"DAILY\"`).","description_kind":"markdown","required":true},"config_sensitive":{"type":["map","string"],"description":"The sensitive configuration settings to set (e.g., `\"gcs.credentials.config\" = \"**REDACTED***\"`). Should not be set for an import operation.","description_kind":"markdown","optional":true,"computed":true,"sensitive":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"status":{"type":"string","description_kind":"plain","optional":true,"computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Kafka cluster ID (e.g., `lkc-12345`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_environment":{"version":0,"block":{"attributes":{"display_name":{"type":"string","description":"A human-readable name for the Environment.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the Environment.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_identity_pool":{"version":0,"block":{"attributes":{"description":{"type":"string","description":"A description of the Identity Pool.","description_kind":"markdown","required":true},"display_name":{"type":"string","description":"A name for the Identity Pool.","description_kind":"markdown","required":true},"filter":{"type":"string","description":"A filter expression that must be evaluated to be true to use this identity pool.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"identity_claim":{"type":"string","description":"A JWT claim to extract the authenticating principal to Confluent resources.","description_kind":"markdown","required":true}},"block_types":{"identity_provider":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the Identity Provider.","description_kind":"markdown","required":true}},"description":"Identity Provider objects represent external OAuth/OpenID Connect providers within Confluent Cloud.","description_kind":"markdown"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_identity_provider":{"version":0,"block":{"attributes":{"description":{"type":"string","description":"A description of the Identity Provider.","description_kind":"markdown","required":true},"display_name":{"type":"string","description":"A name for the Identity Provider.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"issuer":{"type":"string","description":"A publicly reachable issuer URI for the Identity Provider.","description_kind":"markdown","required":true},"jwks_uri":{"type":"string","description":"A publicly reachable JWKS URI for the Identity Provider.","description_kind":"markdown","required":true}},"description_kind":"plain"}},"confluent_kafka_acl":{"version":2,"block":{"attributes":{"host":{"type":"string","description":"The host for the ACL.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"operation":{"type":"string","description":"The operation type for the ACL.","description_kind":"markdown","required":true},"pattern_type":{"type":"string","description":"The pattern type for the ACL.","description_kind":"markdown","required":true},"permission":{"type":"string","description":"The permission for the ACL.","description_kind":"markdown","required":true},"principal":{"type":"string","description":"The principal for the ACL.","description_kind":"markdown","required":true},"resource_name":{"type":"string","description":"The resource name for the ACL.","description_kind":"markdown","required":true},"resource_type":{"type":"string","description":"The type of the resource.","description_kind":"markdown","required":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Kafka cluster ID (e.g., `lkc-12345`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_kafka_client_quota":{"version":0,"block":{"attributes":{"description":{"type":"string","description":"A description of the Kafka Client Quota.","description_kind":"markdown","optional":true},"display_name":{"type":"string","description":"The name of the Kafka Client Quota.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"principals":{"type":["set","string"],"description":"A list of service accounts and identity pools. Special name \"default\" can be used to represent the default quota for all users and service accounts.","description_kind":"markdown","required":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Kafka cluster ID (e.g., `lkc-12345`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"throughput":{"nesting_mode":"list","block":{"attributes":{"egress_byte_rate":{"type":"string","description":"The egress throughput limit in bytes per second.","description_kind":"markdown","required":true},"ingress_byte_rate":{"type":"string","description":"The ingress throughput limit in bytes per second.","description_kind":"markdown","required":true}},"description":"Block for representing a Kafka Quota.","description_kind":"markdown"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_kafka_cluster":{"version":1,"block":{"attributes":{"api_version":{"type":"string","description":"API Version defines the schema version of this representation of a Kafka cluster.","description_kind":"markdown","computed":true},"availability":{"type":"string","description":"The availability zone configuration of the Kafka cluster.","description_kind":"markdown","required":true},"bootstrap_endpoint":{"type":"string","description":"The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster.","description_kind":"markdown","computed":true},"cloud":{"type":"string","description":"The cloud service provider that runs the Kafka cluster.","description_kind":"markdown","required":true},"display_name":{"type":"string","description":"The name of the Kafka cluster.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"kind":{"type":"string","description":"Kind defines the object Kafka cluster represents.","description_kind":"markdown","computed":true},"rbac_crn":{"type":"string","description":"The Confluent Resource Name of the Kafka cluster suitable for confluent_role_binding's crn_pattern.","description_kind":"markdown","computed":true},"region":{"type":"string","description":"The cloud service provider region where the Kafka cluster is running.","description_kind":"markdown","required":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster.","description_kind":"markdown","computed":true}},"block_types":{"basic":{"nesting_mode":"list","block":{"description_kind":"plain"}},"dedicated":{"nesting_mode":"list","block":{"attributes":{"cku":{"type":"number","description":"The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. MULTI_ZONE dedicated clusters must have at least two CKUs.","description_kind":"markdown","required":true},"encryption_key":{"type":"string","description":"The ID of the encryption key that is used to encrypt the data in the Kafka cluster.","description_kind":"markdown","optional":true}},"description_kind":"plain"},"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"network":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the network.","description_kind":"markdown","required":true}},"description":"Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.","description_kind":"markdown"},"max_items":1},"standard":{"nesting_mode":"list","block":{"description_kind":"plain"}},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true},"update":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_kafka_cluster_config":{"version":0,"block":{"attributes":{"config":{"type":["map","string"],"description":"The custom cluster settings to set (e.g., `\"num.partitions\" = \"8\"`).","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Kafka cluster ID (e.g., `lkc-12345`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_kafka_mirror_topic":{"version":0,"block":{"attributes":{"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"mirror_topic_name":{"type":"string","description":"Name of the topic to be mirrored over the Kafka Mirror Topic, i.e. the source topic's name. Only required when there is a prefix configured on the link.","description_kind":"markdown","optional":true,"computed":true},"status":{"type":"string","description_kind":"plain","optional":true,"computed":true}},"block_types":{"cluster_link":{"nesting_mode":"list","block":{"attributes":{"link_name":{"type":"string","description":"The name of the Cluster Link.","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the referred Kafka cluster.","description_kind":"markdown","required":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Kafka API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Kafka API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Kafka API Credentials.","description_kind":"markdown"},"max_items":1}},"description_kind":"plain"},"min_items":1,"max_items":1},"source_kafka_topic":{"nesting_mode":"list","block":{"attributes":{"topic_name":{"type":"string","description":"The name of the Source Kafka topic.","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_kafka_topic":{"version":2,"block":{"attributes":{"config":{"type":["map","string"],"description":"The custom topic settings to set (e.g., `\"cleanup.policy\" = \"compact\"`).","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"partitions_count":{"type":"number","description":"The number of partitions to create in the topic. Defaults to `6`.","description_kind":"markdown","optional":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"topic_name":{"type":"string","description":"The name of the topic, for example, `orders-1`.","description_kind":"markdown","required":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Kafka cluster ID (e.g., `lkc-12345`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_ksql_cluster":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description_kind":"plain","computed":true},"csu":{"type":"number","description":"The number of Confluent Streaming Units (CSUs) for the ksqlDB cluster.","description_kind":"markdown","required":true},"display_name":{"type":"string","description":"The name of the ksqlDB cluster.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"kind":{"type":"string","description_kind":"plain","computed":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the ksqlDB cluster.","description_kind":"markdown","computed":true},"rest_endpoint":{"type":"string","description":"The API endpoint of the ksqlDB cluster.","description_kind":"markdown","computed":true},"storage":{"type":"number","description":"The amount of storage (in GB) provisioned to the ksqlDB cluster.","description_kind":"markdown","computed":true},"topic_prefix":{"type":"string","description":"Topic name prefix used by this ksqlDB cluster. Used to assign ACLs for this ksqlDB cluster to use.","description_kind":"markdown","computed":true},"use_detailed_processing_log":{"type":"bool","description":"Controls whether the row data should be included in the processing log topic. Set it to `false` if you don't want to emit sensitive information to the processing log. Defaults to `true`. Defaults to `true`.","description_kind":"markdown","optional":true}},"block_types":{"credential_identity":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The credential_identity to which this belongs. The credential_identity can be one of iam.v2.User, iam.v2.ServiceAccount.","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Kafka cluster ID (e.g., `lkc-12345`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_network":{"version":0,"block":{"attributes":{"cidr":{"type":"string","description":"The IPv4 CIDR block to used for this network. Must be /16. Required for VPC peering and AWS TransitGateway.","description_kind":"markdown","optional":true,"computed":true},"cloud":{"type":"string","description":"The cloud service provider in which the network exists.","description_kind":"markdown","required":true},"connection_types":{"type":["list","string"],"description_kind":"plain","required":true},"display_name":{"type":"string","description":"The name of the Network.","description_kind":"markdown","optional":true,"computed":true},"dns_domain":{"type":"string","description":"The root DNS domain for the network if applicable. Present on networks that support PrivateLink.","description_kind":"markdown","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"region":{"type":"string","description":"The cloud service provider region where the network exists.","description_kind":"markdown","required":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the Network.","description_kind":"markdown","computed":true},"zonal_subdomains":{"type":["map","string"],"description":"The DNS subdomain for each zone. Present on networks that support PrivateLink. Keys are zones and values are DNS domains.","description_kind":"markdown","computed":true},"zones":{"type":["list","string"],"description":"The 3 availability zones for this network. They can optionally be specified for only AWS networks used with PrivateLink. Otherwise, they are automatically chosen by Confluent Cloud.","description_kind":"markdown","optional":true,"computed":true}},"block_types":{"aws":{"nesting_mode":"list","block":{"attributes":{"account":{"type":"string","description":"The AWS account ID associated with the Confluent Cloud VPC.","description_kind":"markdown","computed":true},"private_link_endpoint_service":{"type":"string","description":"The endpoint service of the Confluent Cloud VPC (used for PrivateLink) if available.","description_kind":"markdown","computed":true},"vpc":{"type":"string","description":"The Confluent Cloud VPC ID.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"azure":{"nesting_mode":"list","block":{"attributes":{"private_link_service_aliases":{"type":["map","string"],"description_kind":"plain","computed":true}},"description_kind":"plain"}},"dns_config":{"nesting_mode":"list","block":{"attributes":{"resolution":{"type":"string","description":"Network DNS resolution.","description_kind":"markdown","required":true}},"description":"Network DNS config. It applies only to the PRIVATELINK network connection type.","description_kind":"markdown"},"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"gcp":{"nesting_mode":"list","block":{"attributes":{"private_service_connect_service_attachments":{"type":["map","string"],"description":"The mapping of zones to Private Service Connect service attachments if available. Keys are zones and values are [GCP Private Service Connect service attachment](https://cloud.google.com/vpc/docs/configure-private-service-connect-producer#api_7).","description_kind":"markdown","computed":true},"project":{"type":"string","description":"The GCP project.","description_kind":"markdown","computed":true},"vpc_network":{"type":"string","description":"The GCP VPC network name.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true},"delete":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_peering":{"version":0,"block":{"attributes":{"display_name":{"type":"string","description":"The name of the Peering.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true}},"block_types":{"aws":{"nesting_mode":"list","block":{"attributes":{"account":{"type":"string","description":"AWS account for VPC to peer with the network.","description_kind":"markdown","required":true},"customer_region":{"type":"string","description":"Region of customer VPC.","description_kind":"markdown","required":true},"routes":{"type":["list","string"],"description":"List of routes for the peering.","description_kind":"markdown","required":true},"vpc":{"type":"string","description":"The id of the AWS VPC to peer with.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"azure":{"nesting_mode":"list","block":{"attributes":{"customer_region":{"type":"string","description":"Region of customer VNet.","description_kind":"markdown","required":true},"tenant":{"type":"string","description":"Customer Azure tenant.","description_kind":"markdown","required":true},"vnet":{"type":"string","description":"Customer VNet to peer with.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"gcp":{"nesting_mode":"list","block":{"attributes":{"import_custom_routes":{"type":"bool","description":"Enable customer route import. Defaults to `false`.","description_kind":"markdown","optional":true},"project":{"type":"string","description":"The name of the GCP project.","description_kind":"markdown","required":true},"vpc_network":{"type":"string","description":"The name of the GCP VPC network to peer with.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"network":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the network.","description_kind":"markdown","required":true}},"description":"Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.","description_kind":"markdown"},"min_items":1,"max_items":1},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true},"delete":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_private_link_access":{"version":0,"block":{"attributes":{"display_name":{"type":"string","description":"The name of the PrivateLink access.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true}},"block_types":{"aws":{"nesting_mode":"list","block":{"attributes":{"account":{"type":"string","description":"AWS Account ID to allow for PrivateLink access. Find here (https://console.aws.amazon.com/billing/home?#/account) under My Account in your AWS Management Console.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"azure":{"nesting_mode":"list","block":{"attributes":{"subscription":{"type":"string","description":"Azure subscription to allow for PrivateLink access.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"gcp":{"nesting_mode":"list","block":{"attributes":{"project":{"type":"string","description":"The GCP project ID to allow for Private Service Connect access.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"network":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the network.","description_kind":"markdown","required":true}},"description":"Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.","description_kind":"markdown"},"min_items":1,"max_items":1},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true},"delete":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_role_binding":{"version":0,"block":{"attributes":{"crn_pattern":{"type":"string","description":"A CRN that specifies the scope and resource patterns necessary for the role to bind.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"principal":{"type":"string","description":"The principal User to bind the role to.","description_kind":"markdown","required":true},"role_name":{"type":"string","description":"The name of the role to bind to the principal.","description_kind":"markdown","required":true}},"description_kind":"plain"}},"confluent_schema":{"version":0,"block":{"attributes":{"format":{"type":"string","description":"The format of the Schema.","description_kind":"markdown","required":true},"hard_delete":{"type":"bool","description":"Controls whether a schema should be soft or hard deleted. Set it to `true` if you want to hard delete a schema on destroy. Defaults to `false` (soft delete). Defaults to `false`.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"recreate_on_update":{"type":"bool","description":"Controls whether a schema should be recreated on update. Defaults to `false`.","description_kind":"markdown","optional":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"schema":{"type":"string","description":"The definition of the Schema.","description_kind":"markdown","required":true},"schema_identifier":{"type":"number","description":"Globally unique identifier of the Schema returned for a creation request. It should be used to retrieve this schema from the schemas resource and is different from the schema’s version which is associated with the subject.","description_kind":"markdown","computed":true},"subject_name":{"type":"string","description":"The name of the Schema Registry Subject.","description_kind":"markdown","required":true},"version":{"type":"number","description":"The version number of the Schema.","description_kind":"markdown","computed":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_reference":{"nesting_mode":"list","block":{"attributes":{"name":{"type":"string","description":"The name of the Schema references (for example, \"io.confluent.kafka.example.User\"). For Avro, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf, it is the name of another Protobuf file.","description_kind":"markdown","required":true},"subject_name":{"type":"string","description":"The name of the referenced Schema Registry Subject (for example, \"User\").","description_kind":"markdown","required":true},"version":{"type":"number","description":"The version of the referenced Schema.","description_kind":"markdown","required":true}},"description":"The list of references to other Schemas.","description_kind":"markdown"}},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_cluster":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description":"API Version defines the schema version of this representation of a Schema Registry Cluster.","description_kind":"markdown","computed":true},"display_name":{"type":"string","description":"The name of the Schema Registry Cluster.","description_kind":"markdown","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"kind":{"type":"string","description":"Kind defines the object Schema Registry Cluster represents.","description_kind":"markdown","computed":true},"package":{"type":"string","description":"The billing package.","description_kind":"markdown","required":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the Schema Registry Cluster.","description_kind":"markdown","computed":true},"rest_endpoint":{"type":"string","description":"The API endpoint of the Schema Registry Cluster.","description_kind":"markdown","computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"region":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the Schema Registry Region.","description_kind":"markdown","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_cluster_config":{"version":0,"block":{"attributes":{"compatibility_level":{"type":"string","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_cluster_mode":{"version":0,"block":{"attributes":{"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"mode":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_service_account":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description":"API Version defines the schema version of this representation of a Service Account.","description_kind":"markdown","computed":true},"description":{"type":"string","description":"A free-form description of the Service Account.","description_kind":"markdown","optional":true},"display_name":{"type":"string","description":"A human-readable name for the Service Account.","description_kind":"markdown","required":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"kind":{"type":"string","description":"Kind defines the object Service Account represents.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_subject_config":{"version":0,"block":{"attributes":{"compatibility_level":{"type":"string","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"subject_name":{"type":"string","description":"The name of the Schema Registry Subject.","description_kind":"markdown","required":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_subject_mode":{"version":0,"block":{"attributes":{"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"mode":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"subject_name":{"type":"string","description":"The name of the Schema Registry Subject.","description_kind":"markdown","required":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_transit_gateway_attachment":{"version":0,"block":{"attributes":{"display_name":{"type":"string","description":"The name of the Transit Gateway Attachment.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true}},"block_types":{"aws":{"nesting_mode":"list","block":{"attributes":{"ram_resource_share_arn":{"type":"string","description":"The Amazon Resource Name (ARN) of the Resource Access Manager (RAM) Resource Share of the transit gateway your Confluent Cloud network attaches to.","description_kind":"markdown","required":true},"routes":{"type":["list","string"],"description":"List of destination routes for traffic from Confluent VPC to customer VPC via Transit Gateway.","description_kind":"markdown","required":true},"transit_gateway_attachment_id":{"type":"string","description":"The ID of the AWS Transit Gateway VPC Attachment that attaches Confluent VPC to Transit Gateway.","description_kind":"markdown","computed":true},"transit_gateway_id":{"type":"string","description":"The ID of the AWS Transit Gateway that your Confluent Cloud network attaches to.","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the environment.","description_kind":"markdown","required":true}},"description":"Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.","description_kind":"markdown"},"min_items":1,"max_items":1},"network":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The unique identifier for the network.","description_kind":"markdown","required":true}},"description":"Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.","description_kind":"markdown"},"min_items":1,"max_items":1},"timeouts":{"nesting_mode":"single","block":{"attributes":{"create":{"type":"string","description_kind":"plain","optional":true},"delete":{"type":"string","description_kind":"plain","optional":true}},"description_kind":"plain"}}},"description_kind":"plain"}}},"data_source_schemas":{"confluent_environment":{"version":0,"block":{"attributes":{"display_name":{"type":"string","description":"A human-readable name for the Environment.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the Environment (e.g., `env-abc123`).","description_kind":"markdown","optional":true,"computed":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the Environment.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_identity_pool":{"version":0,"block":{"attributes":{"description":{"type":"string","description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"filter":{"type":"string","description_kind":"plain","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"identity_claim":{"type":"string","description_kind":"plain","computed":true}},"block_types":{"identity_provider":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_identity_provider":{"version":0,"block":{"attributes":{"description":{"type":"string","description":"A description of the Identity Provider.","description_kind":"markdown","computed":true},"display_name":{"type":"string","description":"A name for the Identity Provider.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the Identity Provider (e.g., `op-abc123`).","description_kind":"markdown","optional":true,"computed":true},"issuer":{"type":"string","description":"A publicly reachable issuer URI for the Identity Provider.","description_kind":"markdown","computed":true},"jwks_uri":{"type":"string","description":"A publicly reachable JWKS URI for the Identity Provider.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_kafka_client_quota":{"version":0,"block":{"attributes":{"description":{"type":"string","description":"A description of the Kafka Client Quota.","description_kind":"markdown","computed":true},"display_name":{"type":"string","description":"The name of the Kafka Client Quota.","description_kind":"markdown","computed":true},"environment":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true},"id":{"type":"string","description":"The ID of the Kafka Client Quota (e.g., `rb-abc123`).","description_kind":"markdown","required":true},"kafka_cluster":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true},"principals":{"type":["set","string"],"description":"A list of service accounts and identity pools. Special name \"default\" can be used to represent the default quota for all users and service accounts.","description_kind":"markdown","computed":true},"throughput":{"type":["list",["object",{"egress_byte_rate":"string","ingress_byte_rate":"string"}]],"description":"Block for representing a Kafka Quota.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_kafka_cluster":{"version":1,"block":{"attributes":{"api_version":{"type":"string","description_kind":"plain","computed":true},"availability":{"type":"string","description_kind":"plain","computed":true},"bootstrap_endpoint":{"type":"string","description_kind":"plain","computed":true},"cloud":{"type":"string","description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the Kafka cluster, for example, `lkc-abc123`.","description_kind":"markdown","optional":true,"computed":true},"kind":{"type":"string","description_kind":"plain","computed":true},"network":{"type":["list",["object",{"id":"string"}]],"description":"Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.","description_kind":"markdown","computed":true},"rbac_crn":{"type":"string","description_kind":"plain","computed":true},"region":{"type":"string","description_kind":"plain","computed":true},"rest_endpoint":{"type":"string","description_kind":"plain","computed":true}},"block_types":{"basic":{"nesting_mode":"list","block":{"description_kind":"plain"}},"dedicated":{"nesting_mode":"list","block":{"attributes":{"cku":{"type":"number","description":"The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. MULTI_ZONE dedicated clusters must have at least two CKUs.","description_kind":"markdown","computed":true},"encryption_key":{"type":"string","description":"The ID of the encryption key that is used to encrypt the data in the Kafka cluster.","description_kind":"markdown","computed":true}},"description_kind":"plain"},"max_items":1},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"standard":{"nesting_mode":"list","block":{"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_kafka_topic":{"version":2,"block":{"attributes":{"config":{"type":["map","string"],"description_kind":"plain","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"partitions_count":{"type":"number","description_kind":"plain","computed":true},"rest_endpoint":{"type":"string","description_kind":"plain","required":true},"topic_name":{"type":"string","description_kind":"plain","required":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"kafka_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_ksql_cluster":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description_kind":"plain","computed":true},"credential_identity":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true},"csu":{"type":"number","description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"kafka_cluster":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true},"kind":{"type":"string","description_kind":"plain","computed":true},"resource_name":{"type":"string","description_kind":"plain","computed":true},"rest_endpoint":{"type":"string","description":"The dataplane endpoint of the ksqlDB cluster.","description_kind":"markdown","computed":true},"storage":{"type":"number","description":"Amount of storage (in GB) provisioned to this cluster.","description_kind":"markdown","computed":true},"topic_prefix":{"type":"string","description":"Topic name prefix used by this ksqlDB cluster. Used to assign ACLs for this ksqlDB cluster to use.","description_kind":"markdown","computed":true},"use_detailed_processing_log":{"type":"bool","description_kind":"plain","computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_network":{"version":0,"block":{"attributes":{"cidr":{"type":"string","description_kind":"plain","computed":true},"cloud":{"type":"string","description_kind":"plain","computed":true},"connection_types":{"type":["list","string"],"description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"dns_domain":{"type":"string","description_kind":"plain","computed":true},"id":{"type":"string","description":"The ID of the Network, for example, `n-abc123`.","description_kind":"markdown","optional":true,"computed":true},"region":{"type":"string","description_kind":"plain","computed":true},"resource_name":{"type":"string","description_kind":"plain","computed":true},"zonal_subdomains":{"type":["map","string"],"description":"The DNS subdomain for each zone. Present on networks that support PrivateLink. Keys are zones and values are DNS domains.","description_kind":"markdown","computed":true},"zones":{"type":["list","string"],"description_kind":"plain","computed":true}},"block_types":{"aws":{"nesting_mode":"list","block":{"attributes":{"account":{"type":"string","description":"The AWS account ID associated with the Confluent Cloud VPC.","description_kind":"markdown","computed":true},"private_link_endpoint_service":{"type":"string","description":"The endpoint service of the Confluent Cloud VPC (used for PrivateLink) if available.","description_kind":"markdown","computed":true},"vpc":{"type":"string","description":"The Confluent Cloud VPC ID.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"azure":{"nesting_mode":"list","block":{"attributes":{"private_link_service_aliases":{"type":["map","string"],"description_kind":"plain","computed":true}},"description_kind":"plain"}},"dns_config":{"nesting_mode":"list","block":{"attributes":{"resolution":{"type":"string","description":"Network DNS resolution.","description_kind":"markdown","computed":true}},"description":"Network DNS config. It applies only to the PRIVATELINK network connection type.","description_kind":"markdown"}},"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1},"gcp":{"nesting_mode":"list","block":{"attributes":{"private_service_connect_service_attachments":{"type":["map","string"],"description":"The mapping of zones to Private Service Connect service attachments if available. Keys are zones and values are [GCP Private Service Connect service attachment](https://cloud.google.com/vpc/docs/configure-private-service-connect-producer#api_7).","description_kind":"markdown","computed":true},"project":{"type":"string","description":"The GCP project.","description_kind":"markdown","computed":true},"vpc_network":{"type":"string","description":"The GCP VPC network name.","description_kind":"markdown","computed":true}},"description_kind":"plain"}}},"description_kind":"plain"}},"confluent_organization":{"version":0,"block":{"attributes":{"id":{"type":"string","description":"The ID of the Organization (e.g., `1111aaaa-11aa-11aa-11aa-111111aaaaaa`).","description_kind":"markdown","computed":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the Organization.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_peering":{"version":0,"block":{"attributes":{"aws":{"type":["list",["object",{"account":"string","customer_region":"string","routes":["list","string"],"vpc":"string"}]],"description_kind":"plain","computed":true},"azure":{"type":["list",["object",{"customer_region":"string","tenant":"string","vnet":"string"}]],"description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"gcp":{"type":["list",["object",{"import_custom_routes":"bool","project":"string","vpc_network":"string"}]],"description_kind":"plain","computed":true},"id":{"type":"string","description":"The ID of the Peering, for example, `pla-abc123`.","description_kind":"markdown","optional":true,"computed":true},"network":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_private_link_access":{"version":0,"block":{"attributes":{"aws":{"type":["list",["object",{"account":"string"}]],"description_kind":"plain","computed":true},"azure":{"type":["list",["object",{"subscription":"string"}]],"description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"gcp":{"type":["list",["object",{"project":"string"}]],"description_kind":"plain","computed":true},"id":{"type":"string","description":"The ID of the Private Link Access, for example, `pla-abc123`.","description_kind":"markdown","optional":true,"computed":true},"network":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_role_binding":{"version":0,"block":{"attributes":{"crn_pattern":{"type":"string","description":"A CRN that specifies the scope and resource patterns necessary for the role to bind.","description_kind":"markdown","computed":true},"id":{"type":"string","description":"The ID of the Role Binding (e.g., `rb-abc123`).","description_kind":"markdown","required":true},"principal":{"type":"string","description":"The principal User to bind the role to.","description_kind":"markdown","computed":true},"role_name":{"type":"string","description":"The name of the role to bind to the principal.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_schema":{"version":0,"block":{"attributes":{"format":{"type":"string","description":"The format of the Schema.","description_kind":"markdown","computed":true},"hard_delete":{"type":"bool","description":"Controls whether a schema should be soft or hard deleted. Set it to `true` if you want to hard delete a schema on destroy. Defaults to `false` (soft delete).","description_kind":"markdown","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"recreate_on_update":{"type":"bool","description":"Controls whether a schema should be recreated on update.","description_kind":"markdown","computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"schema":{"type":"string","description":"The definition of the Schema.","description_kind":"markdown","computed":true},"schema_identifier":{"type":"number","description":"Globally unique identifier of the Schema returned for a creation request. It should be used to retrieve this schema from the schemas resource and is different from the schema’s version which is associated with the subject.","description_kind":"markdown","required":true},"schema_reference":{"type":["list",["object",{"name":"string","subject_name":"string","version":"number"}]],"description":"The list of references to other Schemas.","description_kind":"markdown","computed":true},"subject_name":{"type":"string","description":"The name of the Schema Registry Subject.","description_kind":"markdown","required":true},"version":{"type":"number","description":"The version number of the Schema.","description_kind":"markdown","computed":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_cluster":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description":"API Version defines the schema version of this representation of a Schema Registry Cluster.","description_kind":"markdown","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the Schema Registry cluster, for example, `lsrc-755ogo`.","description_kind":"markdown","optional":true,"computed":true},"kind":{"type":"string","description":"Kind defines the object Schema Registry Cluster represents.","description_kind":"markdown","computed":true},"package":{"type":"string","description":"The billing package.","description_kind":"markdown","computed":true},"region":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true},"resource_name":{"type":"string","description":"The Confluent Resource Name of the Schema Registry Cluster.","description_kind":"markdown","computed":true},"rest_endpoint":{"type":"string","description":"The API endpoint of the Schema Registry Cluster.","description_kind":"markdown","computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_cluster_config":{"version":0,"block":{"attributes":{"compatibility_level":{"type":"string","description_kind":"plain","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_cluster_mode":{"version":0,"block":{"attributes":{"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"mode":{"type":"string","description_kind":"plain","computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_schema_registry_region":{"version":0,"block":{"attributes":{"cloud":{"type":"string","description_kind":"plain","required":true},"id":{"type":"string","description":"The ID of the Schema Registry Region (e.g., `sgreg-123`).","description_kind":"markdown","computed":true},"package":{"type":"string","description":"The billing package.","description_kind":"markdown","required":true},"region":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"}},"confluent_service_account":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description":"API Version defines the schema version of this representation of a Service Account.","description_kind":"markdown","computed":true},"description":{"type":"string","description":"A free-form description of the Service Account.","description_kind":"markdown","computed":true},"display_name":{"type":"string","description":"A human-readable name for the Service Account.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the Service Account (e.g., `sa-abc123`).","description_kind":"markdown","optional":true,"computed":true},"kind":{"type":"string","description":"Kind defines the object Service Account represents.","description_kind":"markdown","computed":true}},"description_kind":"plain"}},"confluent_subject_config":{"version":0,"block":{"attributes":{"compatibility_level":{"type":"string","description_kind":"plain","computed":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"subject_name":{"type":"string","description":"The name of the Schema Registry Subject.","description_kind":"markdown","required":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_subject_mode":{"version":0,"block":{"attributes":{"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"mode":{"type":"string","description_kind":"plain","computed":true},"rest_endpoint":{"type":"string","description":"The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`).","description_kind":"markdown","optional":true},"subject_name":{"type":"string","description":"The name of the Schema Registry Subject.","description_kind":"markdown","required":true}},"block_types":{"credentials":{"nesting_mode":"list","block":{"attributes":{"key":{"type":"string","description":"The Cluster API Key for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true},"secret":{"type":"string","description":"The Cluster API Secret for your Confluent Cloud cluster.","description_kind":"markdown","required":true,"sensitive":true}},"description":"The Cluster API Credentials.","description_kind":"markdown"},"max_items":1},"schema_registry_cluster":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description":"The Schema Registry cluster ID (e.g., `lsrc-abc123`).","description_kind":"markdown","required":true}},"description_kind":"plain"},"max_items":1}},"description_kind":"plain"}},"confluent_transit_gateway_attachment":{"version":0,"block":{"attributes":{"aws":{"type":["list",["object",{"ram_resource_share_arn":"string","routes":["list","string"],"transit_gateway_attachment_id":"string","transit_gateway_id":"string"}]],"description_kind":"plain","computed":true},"display_name":{"type":"string","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the TransitGatewayAttachment, for example, `pla-abc123`.","description_kind":"markdown","optional":true,"computed":true},"network":{"type":["list",["object",{"id":"string"}]],"description_kind":"plain","computed":true}},"block_types":{"environment":{"nesting_mode":"list","block":{"attributes":{"id":{"type":"string","description_kind":"plain","required":true}},"description_kind":"plain"},"min_items":1,"max_items":1}},"description_kind":"plain"}},"confluent_user":{"version":0,"block":{"attributes":{"api_version":{"type":"string","description":"API Version defines the schema version of this representation of a User.","description_kind":"markdown","computed":true},"email":{"type":"string","description":"The email address of the User.","description_kind":"markdown","optional":true,"computed":true},"full_name":{"type":"string","description":"The full name of the User.","description_kind":"markdown","optional":true,"computed":true},"id":{"type":"string","description":"The ID of the User (e.g., `u-abc123`).","description_kind":"markdown","optional":true,"computed":true},"kind":{"type":"string","description":"Kind defines the object User represents.","description_kind":"markdown","computed":true}},"description_kind":"plain"}}}}}} diff --git a/examples/install.yaml b/examples/install.yaml new file mode 100644 index 0000000..1f7fc05 --- /dev/null +++ b/examples/install.yaml @@ -0,0 +1,6 @@ +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-confluent +spec: + package: crossplane-contrib/provider-confluent:v0.1.0 diff --git a/examples/null/resource.yaml b/examples/null/resource.yaml new file mode 100644 index 0000000..fd6956d --- /dev/null +++ b/examples/null/resource.yaml @@ -0,0 +1,10 @@ +apiVersion: null.confluent.upbound.io/v1alpha1 +kind: Resource +metadata: + name: example +spec: + forProvider: + triggers: + example-trigger: example-value + providerConfigRef: + name: default \ No newline at end of file diff --git a/examples/providerconfig/.gitignore b/examples/providerconfig/.gitignore new file mode 100644 index 0000000..4a424df --- /dev/null +++ b/examples/providerconfig/.gitignore @@ -0,0 +1 @@ +secret.yaml diff --git a/examples/providerconfig/providerconfig.yaml b/examples/providerconfig/providerconfig.yaml new file mode 100644 index 0000000..8d424e2 --- /dev/null +++ b/examples/providerconfig/providerconfig.yaml @@ -0,0 +1,11 @@ +apiVersion: confluent.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + name: example-creds + namespace: crossplane-system + key: credentials diff --git a/examples/providerconfig/secret.yaml.tmpl b/examples/providerconfig/secret.yaml.tmpl new file mode 100644 index 0000000..6dfa5c8 --- /dev/null +++ b/examples/providerconfig/secret.yaml.tmpl @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + name: example-creds + namespace: crossplane-system +type: Opaque +stringData: + credentials: | + { + "username": "admin", + "password": "t0ps3cr3t11" + } diff --git a/examples/storeconfig/vault.yaml b/examples/storeconfig/vault.yaml new file mode 100644 index 0000000..9a459c0 --- /dev/null +++ b/examples/storeconfig/vault.yaml @@ -0,0 +1,19 @@ +apiVersion: confluent.upbound.io/v1alpha1 +kind: StoreConfig +metadata: + name: vault +spec: + type: Vault + defaultScope: crossplane-system + vault: + server: http://vault.vault-system:8200 + mountPath: kv2/ + version: v2 + auth: + method: Token + token: + source: Secret + secretRef: + namespace: crossplane-system + name: vault-token + key: token \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..0f0a3c7 --- /dev/null +++ b/go.mod @@ -0,0 +1,143 @@ +module github.com/crossplane-contrib/provider-confluent + +go 1.19 + +require ( + github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230406155702-4e1673b7141f + github.com/crossplane/crossplane-tools v0.0.0-20230327091744-4236bf732aa5 + github.com/pkg/errors v0.9.1 + github.com/upbound/upjet v0.9.0-rc.0.0.20230413124512-01e5c1fafb42 + gopkg.in/alecthomas/kingpin.v2 v2.2.6 + k8s.io/apimachinery v0.26.3 + k8s.io/client-go v0.26.3 + sigs.k8s.io/controller-runtime v0.14.6 + sigs.k8s.io/controller-tools v0.11.3 +) + +require ( + github.com/agext/levenshtein v1.2.3 // indirect + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/antchfx/htmlquery v1.2.4 // indirect + github.com/antchfx/xpath v1.2.0 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/armon/go-metrics v0.3.9 // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v3 v3.0.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/dave/jennifer v1.4.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.21.1 // indirect + github.com/gobuffalo/flect v0.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect + github.com/hashicorp/go-hclog v1.2.1 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.4.4 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.14.1 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/terraform-json v0.14.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.14.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0 // indirect + github.com/hashicorp/vault/api v1.5.0 // indirect + github.com/hashicorp/vault/sdk v0.4.1 // indirect + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/iancoleman/strcase v0.2.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-ps v1.0.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/muvaf/typewriter v0.0.0-20220131201631-921e94e8e8d7 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/pierrec/lz4 v2.5.2+incompatible // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tmccombs/hcl2json v0.3.3 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect + github.com/vmihailenco/tagparser v0.1.1 // indirect + github.com/yuin/goldmark v1.4.13 // indirect + github.com/zclconf/go-cty v1.11.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.2.0 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/oauth2 v0.1.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.4.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd // indirect + google.golang.org/grpc v1.50.1 // indirect + google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/square/go-jose.v2 v2.5.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.26.3 // indirect + k8s.io/apiextensions-apiserver v0.26.3 // indirect + k8s.io/component-base v0.26.3 // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..82ba238 --- /dev/null +++ b/go.sum @@ -0,0 +1,979 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/kong v0.2.16/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antchfx/htmlquery v1.2.4 h1:qLteofCMe/KGovBI6SQgmou2QNyedFUW+pE+BpeZ494= +github.com/antchfx/htmlquery v1.2.4/go.mod h1:2xO6iu3EVWs7R2JYqBbp8YzG50gj/ofqs5/0VZoDZLc= +github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= +github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230406155702-4e1673b7141f h1:wDRr6gaoiQstEdddrn0B5SSSgzdXreOQAbdmRH+9JeI= +github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230406155702-4e1673b7141f/go.mod h1:ebtUpmconMy8RKUEhrCXTUFSOpfGQqbKM2E+rjCCYJo= +github.com/crossplane/crossplane-tools v0.0.0-20230327091744-4236bf732aa5 h1:K9H55wcwfXcGroZApIgPmIGRGuZLszsLDCYB12p2yMo= +github.com/crossplane/crossplane-tools v0.0.0-20230327091744-4236bf732aa5/go.mod h1:+e4OaFlOcmr0JvINHl/yvEYBrZawzTgj6pQumOH1SS0= +github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw= +github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/gobuffalo/flect v0.3.0 h1:erfPWM+K1rFNIQeRPdeEXxo8yFr/PO17lhRnS8FUrtk= +github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/addlicense v0.0.0-20210428195630-6d92264d7170/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.9.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34= +github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= +github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= +github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= +github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0 h1:FtCLTiTcykdsURXPt/ku7fYXm3y19nbzbZcUxHx9RbI= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0/go.mod h1:80wf5oad1tW+oLnbXS4UTYmDCrl7BuN1Q+IA91X1a4Y= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/muvaf/typewriter v0.0.0-20220131201631-921e94e8e8d7 h1:CxRHKnh1YJXgNKxcos9rrKL6AcmOl1AS/fygmxFDzh4= +github.com/muvaf/typewriter v0.0.0-20220131201631-921e94e8e8d7/go.mod h1:SAAdeMEiFXR8LcHffvIdiLI1w243DCH2DuHq7UrA5YQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= +github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= +github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/upbound/upjet v0.9.0-rc.0.0.20230413124512-01e5c1fafb42 h1:kEpQ3I4YXkdatz/dT8LvvFUHbJTPw4SBz4SdFmPS/vw= +github.com/upbound/upjet v0.9.0-rc.0.0.20230413124512-01e5c1fafb42/go.mod h1:IhrO+1L/Ieq7ZCORhiuZ4sShOzl/GoVGVjq0+JCMDYo= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.8.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0= +github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= +golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd h1:OjndDrsik+Gt+e6fs45z9AxiewiKyLKYpA45W5Kpkks= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= +k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= +k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= +k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= +k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= +k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= +k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= +k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= +k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= +sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-tools v0.11.3 h1:T1xzLkog9saiyQSLz1XOImu4OcbdXWytc5cmYsBeBiE= +sigs.k8s.io/controller-tools v0.11.3/go.mod h1:qcfX7jfcfYD/b7lAhvqAyTbt/px4GpvN88WKLFFv7p8= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..7a5ad45 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,3 @@ +/* +Copyright 2022 Upbound Inc. +*/ \ No newline at end of file diff --git a/hack/prepare.sh b/hack/prepare.sh new file mode 100755 index 0000000..c7a649b --- /dev/null +++ b/hack/prepare.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -euox pipefail + +read -r -p "Lower case provider name (ex. github): " PROVIDER_NAME_LOWER +read -r -p "Normal case provider name (ex. GitHub): " PROVIDER_NAME_NORMAL +read -r -p "Organization (ex. upbound, my-org-name): " ORGANIZATION_NAME +read -r -p "CRD rootGroup (ex. upbound.io, crossplane.io): " CRD_ROOT_GROUP + +REPLACE_FILES='./* ./.github :!build/** :!go.* :!hack/prepare.sh' +# shellcheck disable=SC2086 +git grep -l 'template' -- ${REPLACE_FILES} | xargs sed -i.bak "s/upjet-provider-template/provider-${PROVIDER_NAME_LOWER}/g" +# shellcheck disable=SC2086 +git grep -l 'template' -- ${REPLACE_FILES} | xargs sed -i.bak "s/template/${PROVIDER_NAME_LOWER}/g" +# shellcheck disable=SC2086 +git grep -l "upbound/provider-${PROVIDER_NAME_LOWER}" -- ${REPLACE_FILES} | xargs sed -i.bak "s|upbound/provider-${PROVIDER_NAME_LOWER}|${ORGANIZATION_NAME}/provider-${PROVIDER_NAME_LOWER}|g" +# shellcheck disable=SC2086 +git grep -l 'Template' -- ${REPLACE_FILES} | xargs sed -i.bak "s/Template/${PROVIDER_NAME_NORMAL}/g" +# shellcheck disable=SC2086 +git grep -l "upbound.io" -- "apis/v1*" | xargs sed -i.bak "s|upbound.io|${CRD_ROOT_GROUP}|g" +# shellcheck disable=SC2086 +git grep -l "ujconfig\.WithRootGroup(\"${PROVIDER_NAME_LOWER}.upbound\.io\")" -- "config/provider.go" | xargs sed -i.bak "s|ujconfig.WithRootGroup(\"${PROVIDER_NAME_LOWER}.upbound.io\")|ujconfig.WithRootGroup(\"${CRD_ROOT_GROUP}\")|g" + +# We need to be careful while replacing "template" keyword in go.mod as it could tamper +# some imported packages under require section. +sed -i.bak "s|upbound/upjet-provider-template|${ORGANIZATION_NAME}/provider-${PROVIDER_NAME_LOWER}|g" go.mod +sed -i.bak "s|PROJECT_REPO ?= github.com/upbound/|PROJECT_REPO ?= github.com/${ORGANIZATION_NAME}/|g" Makefile +sed -i.bak "s/\[YEAR\]/$(date +%Y)/g" LICENSE + +# Clean up the .bak files created by sed +git clean -fd + +git mv "internal/clients/template.go" "internal/clients/${PROVIDER_NAME_LOWER}.go" +git mv "cluster/images/upjet-provider-template" "cluster/images/provider-${PROVIDER_NAME_LOWER}" + +# We need to remove this api folder otherwise first `make generate` fails with +# the following error probably due to some optimizations in go generate with v1.17: +# generate: open /Users/hasanturken/Workspace/crossplane-contrib/upjet-provider-template/apis/null/v1alpha1/zz_generated.deepcopy.go: no such file or directory +rm -rf apis/null +# remove the sample directory which was a configuration in the template +rm -rf config/null \ No newline at end of file diff --git a/internal/clients/confluent.go b/internal/clients/confluent.go new file mode 100644 index 0000000..909862e --- /dev/null +++ b/internal/clients/confluent.go @@ -0,0 +1,74 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package clients + +import ( + "context" + "encoding/json" + + "github.com/crossplane-contrib/provider-confluent/apis/v1beta1" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/pkg/errors" + "github.com/upbound/upjet/pkg/terraform" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // ProviderConfig secret keys + cloudAPIKey = "cloud_api_key" + cloudAPISecret = "cloud_api_secret" + + // error messages + errNoProviderConfig = "no providerConfigRef provided" + errGetProviderConfig = "cannot get referenced ProviderConfig" + errTrackUsage = "cannot track ProviderConfig usage" + errExtractCredentials = "cannot extract credentials" + errUnmarshalCredentials = "cannot unmarshal confluent credentials as JSON" +) + +// TerraformSetupBuilder builds Terraform a terraform.SetupFn function which +// returns Terraform provider setup configuration +func TerraformSetupBuilder(version, providerSource, providerVersion string) terraform.SetupFn { + return func(ctx context.Context, client client.Client, mg resource.Managed) (terraform.Setup, error) { + ps := terraform.Setup{ + Version: version, + Requirement: terraform.ProviderRequirement{ + Source: providerSource, + Version: providerVersion, + }, + } + + configRef := mg.GetProviderConfigReference() + if configRef == nil { + return ps, errors.New(errNoProviderConfig) + } + pc := &v1beta1.ProviderConfig{} + if err := client.Get(ctx, types.NamespacedName{Name: configRef.Name}, pc); err != nil { + return ps, errors.Wrap(err, errGetProviderConfig) + } + + t := resource.NewProviderConfigUsageTracker(client, &v1beta1.ProviderConfigUsage{}) + if err := t.Track(ctx, mg); err != nil { + return ps, errors.Wrap(err, errTrackUsage) + } + + data, err := resource.CommonCredentialExtractor(ctx, pc.Spec.Credentials.Source, client, pc.Spec.Credentials.CommonCredentialSelectors) + if err != nil { + return ps, errors.Wrap(err, errExtractCredentials) + } + creds := map[string]string{} + if err := json.Unmarshal(data, &creds); err != nil { + return ps, errors.Wrap(err, errUnmarshalCredentials) + } + + // Set credentials in Terraform provider configuration. + ps.Configuration = map[string]any{ + cloudAPIKey: creds[cloudAPIKey], + cloudAPISecret: creds[cloudAPISecret], + } + return ps, nil + } +} diff --git a/internal/controller/api/key/zz_controller.go b/internal/controller/api/key/zz_controller.go new file mode 100755 index 0000000..36a33dc --- /dev/null +++ b/internal/controller/api/key/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package key + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/api/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles Key managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Key_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_api_key"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Key_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Key_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Key{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/confluent/environment/zz_controller.go b/internal/controller/confluent/environment/zz_controller.go new file mode 100755 index 0000000..a9b2151 --- /dev/null +++ b/internal/controller/confluent/environment/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package environment + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/confluent/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles Environment managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Environment_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_environment"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Environment_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Environment_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Environment{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/doc.go b/internal/controller/doc.go new file mode 100644 index 0000000..a4c8097 --- /dev/null +++ b/internal/controller/doc.go @@ -0,0 +1,5 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package controller diff --git a/internal/controller/kafka/acl/zz_controller.go b/internal/controller/kafka/acl/zz_controller.go new file mode 100755 index 0000000..506bf60 --- /dev/null +++ b/internal/controller/kafka/acl/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package acl + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/kafka/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles ACL managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ACL_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_kafka_acl"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ACL_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ACL_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.ACL{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kafka/cluster/zz_controller.go b/internal/controller/kafka/cluster/zz_controller.go new file mode 100755 index 0000000..7b460b1 --- /dev/null +++ b/internal/controller/kafka/cluster/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package cluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/kafka/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles Cluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Cluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_kafka_cluster"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Cluster{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kafka/clusterconfig/zz_controller.go b/internal/controller/kafka/clusterconfig/zz_controller.go new file mode 100755 index 0000000..5c6de6c --- /dev/null +++ b/internal/controller/kafka/clusterconfig/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package clusterconfig + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/kafka/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles ClusterConfig managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ClusterConfig_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_kafka_cluster_config"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ClusterConfig_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ClusterConfig_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.ClusterConfig{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/providerconfig/config.go b/internal/controller/providerconfig/config.go new file mode 100644 index 0000000..5973384 --- /dev/null +++ b/internal/controller/providerconfig/config.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package providerconfig + +import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/providerconfig" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/upbound/upjet/pkg/controller" + + "github.com/crossplane-contrib/provider-confluent/apis/v1beta1" +) + +// Setup adds a controller that reconciles ProviderConfigs by accounting for +// their current usage. +func Setup(mgr ctrl.Manager, o controller.Options) error { + name := providerconfig.ControllerName(v1beta1.ProviderConfigGroupKind) + + of := resource.ProviderConfigKinds{ + Config: v1beta1.ProviderConfigGroupVersionKind, + UsageList: v1beta1.ProviderConfigUsageListGroupVersionKind, + } + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.ProviderConfig{}). + Watches(&source.Kind{Type: &v1beta1.ProviderConfigUsage{}}, &resource.EnqueueRequestForProviderConfig{}). + Complete(providerconfig.NewReconciler(mgr, of, + providerconfig.WithLogger(o.Logger.WithValues("controller", name)), + providerconfig.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))))) +} diff --git a/internal/controller/role/binding/zz_controller.go b/internal/controller/role/binding/zz_controller.go new file mode 100755 index 0000000..2b5e590 --- /dev/null +++ b/internal/controller/role/binding/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package binding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/role/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles Binding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Binding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_role_binding"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Binding_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Binding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Binding{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/service/account/zz_controller.go b/internal/controller/service/account/zz_controller.go new file mode 100755 index 0000000..570e134 --- /dev/null +++ b/internal/controller/service/account/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package account + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/service/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles Account managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Account_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_service_account"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Account_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Account_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Account{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go new file mode 100755 index 0000000..5785a65 --- /dev/null +++ b/internal/controller/zz_setup.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 Upbound Inc. +*/ + +package controller + +import ( + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/upbound/upjet/pkg/controller" + + key "github.com/crossplane-contrib/provider-confluent/internal/controller/api/key" + environment "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/environment" + acl "github.com/crossplane-contrib/provider-confluent/internal/controller/kafka/acl" + cluster "github.com/crossplane-contrib/provider-confluent/internal/controller/kafka/cluster" + clusterconfig "github.com/crossplane-contrib/provider-confluent/internal/controller/kafka/clusterconfig" + providerconfig "github.com/crossplane-contrib/provider-confluent/internal/controller/providerconfig" + binding "github.com/crossplane-contrib/provider-confluent/internal/controller/role/binding" + account "github.com/crossplane-contrib/provider-confluent/internal/controller/service/account" +) + +// Setup creates all controllers with the supplied logger and adds them to +// the supplied manager. +func Setup(mgr ctrl.Manager, o controller.Options) error { + for _, setup := range []func(ctrl.Manager, controller.Options) error{ + key.Setup, + environment.Setup, + acl.Setup, + cluster.Setup, + clusterconfig.Setup, + providerconfig.Setup, + binding.Setup, + account.Setup, + } { + if err := setup(mgr, o); err != nil { + return err + } + } + return nil +} diff --git a/internal/features/features.go b/internal/features/features.go new file mode 100644 index 0000000..1b37951 --- /dev/null +++ b/internal/features/features.go @@ -0,0 +1,20 @@ +/* + Copyright 2022 Upbound Inc +*/ + +package features + +import "github.com/crossplane/crossplane-runtime/pkg/feature" + +// Feature flags. +const ( + // EnableAlphaExternalSecretStores enables alpha support for + // External Secret Stores. See the below design for more details. + // https://github.com/crossplane/crossplane/blob/390ddd/design/design-doc-external-secret-stores.md + EnableAlphaExternalSecretStores feature.Flag = "EnableAlphaExternalSecretStores" + + // EnableAlphaManagementPolicies enables alpha support for + // Management Policies. See the below design for more details. + // https://github.com/crossplane/crossplane/pull/3531 + EnableAlphaManagementPolicies feature.Flag = "EnableAlphaManagementPolicies" +) diff --git a/package/crds/api.crossplane.io_keys.yaml b/package/crds/api.crossplane.io_keys.yaml new file mode 100644 index 0000000..890a2c5 --- /dev/null +++ b/package/crds/api.crossplane.io_keys.yaml @@ -0,0 +1,421 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: keys.api.crossplane.io +spec: + group: api.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: Key + listKind: KeyList + plural: keys + singular: key + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Key is the Schema for the Keys API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KeySpec defines the desired state of Key + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A free-form description of the API key. + type: string + disableWaitForReady: + description: Defaults to `false`. + type: boolean + displayName: + description: A human-readable name for the API key. + type: string + managedResource: + description: The resource associated with this object. The only + resource that is supported is 'cmk.v2.Cluster', 'srcm.v2.Cluster'. + items: + properties: + apiVersion: + description: The API version of the referred owner. + type: string + environment: + description: Environment objects represent an isolated namespace + for your Confluent resources for organizational purposes. + items: + properties: + id: + description: The unique identifier for the environment. + type: string + required: + - id + type: object + type: array + id: + description: The unique identifier for the referred resource. + type: string + kind: + description: The kind of the referred resource. + type: string + required: + - apiVersion + - environment + - id + - kind + type: object + type: array + owner: + description: The owner to which the API Key belongs. The owner + can be one of 'iam.v2.User', 'iam.v2.ServiceAccount'. + items: + properties: + apiVersion: + description: The API version of the referred owner. + type: string + id: + description: The unique identifier for the referred owner. + type: string + kind: + description: The kind of the referred owner. + type: string + required: + - apiVersion + - id + - kind + type: object + type: array + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: owner is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.owner) + status: + description: KeyStatus defines the observed state of Key. + properties: + atProvider: + properties: + description: + description: A free-form description of the API key. + type: string + disableWaitForReady: + description: Defaults to `false`. + type: boolean + displayName: + description: A human-readable name for the API key. + type: string + id: + type: string + managedResource: + description: The resource associated with this object. The only + resource that is supported is 'cmk.v2.Cluster', 'srcm.v2.Cluster'. + items: + properties: + apiVersion: + description: The API version of the referred owner. + type: string + environment: + description: Environment objects represent an isolated namespace + for your Confluent resources for organizational purposes. + items: + properties: + id: + description: The unique identifier for the environment. + type: string + type: object + type: array + id: + description: The unique identifier for the referred resource. + type: string + kind: + description: The kind of the referred resource. + type: string + type: object + type: array + owner: + description: The owner to which the API Key belongs. The owner + can be one of 'iam.v2.User', 'iam.v2.ServiceAccount'. + items: + properties: + apiVersion: + description: The API version of the referred owner. + type: string + id: + description: The unique identifier for the referred owner. + type: string + kind: + description: The kind of the referred owner. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/confluent.crossplane.io_environments.yaml b/package/crds/confluent.crossplane.io_environments.yaml new file mode 100644 index 0000000..a0b7b11 --- /dev/null +++ b/package/crds/confluent.crossplane.io_environments.yaml @@ -0,0 +1,317 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: environments.confluent.crossplane.io +spec: + group: confluent.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: Environment + listKind: EnvironmentList + plural: environments + singular: environment + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Environment is the Schema for the Environments API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EnvironmentSpec defines the desired state of Environment + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + description: A human-readable name for the Environment. + type: string + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: displayName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.displayName) + status: + description: EnvironmentStatus defines the observed state of Environment. + properties: + atProvider: + properties: + displayName: + description: A human-readable name for the Environment. + type: string + id: + type: string + resourceName: + description: The Confluent Resource Name of the Environment. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/confluent.crossplane.io_providerconfigs.yaml b/package/crds/confluent.crossplane.io_providerconfigs.yaml new file mode 100644 index 0000000..a207705 --- /dev/null +++ b/package/crds/confluent.crossplane.io_providerconfigs.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: providerconfigs.confluent.crossplane.io +spec: + group: confluent.crossplane.io + names: + categories: + - crossplane + - provider + - confluent + kind: ProviderConfig + listKind: ProviderConfigList + plural: providerconfigs + singular: providerconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .spec.credentials.secretRef.name + name: SECRET-NAME + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: A ProviderConfig configures a Confluent provider. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A ProviderConfigSpec defines the desired state of a ProviderConfig. + properties: + credentials: + description: Credentials required to authenticate to this provider. + properties: + env: + description: Env is a reference to an environment variable that + contains credentials that must be used to connect to the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: Fs is a reference to a filesystem location that contains + credentials that must be used to connect to the provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: A SecretRef is a reference to a secret key that contains + the credentials that must be used to connect to the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the provider credentials. + enum: + - None + - Secret + - InjectedIdentity + - Environment + - Filesystem + type: string + required: + - source + type: object + required: + - credentials + type: object + status: + description: A ProviderConfigStatus reflects the observed state of a ProviderConfig. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + users: + description: Users of this provider configuration. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/confluent.crossplane.io_providerconfigusages.yaml b/package/crds/confluent.crossplane.io_providerconfigusages.yaml new file mode 100644 index 0000000..19160fc --- /dev/null +++ b/package/crds/confluent.crossplane.io_providerconfigusages.yaml @@ -0,0 +1,111 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: providerconfigusages.confluent.crossplane.io +spec: + group: confluent.crossplane.io + names: + categories: + - crossplane + - provider + - confluent + kind: ProviderConfigUsage + listKind: ProviderConfigUsageList + plural: providerconfigusages + singular: providerconfigusage + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .providerConfigRef.name + name: CONFIG-NAME + type: string + - jsonPath: .resourceRef.kind + name: RESOURCE-KIND + type: string + - jsonPath: .resourceRef.name + name: RESOURCE-NAME + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: A ProviderConfigUsage indicates that a resource is using a ProviderConfig. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + providerConfigRef: + description: ProviderConfigReference to the provider config being used. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this reference + is required. The default is 'Required', which means the reconcile + will fail if the reference cannot be resolved. 'Optional' means + this reference will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should be resolved. + The default is 'IfNotPresent', which will attempt to resolve + the reference only when the corresponding field is not present. + Use 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceRef: + description: ResourceReference to the managed resource using the provider + config. + properties: + apiVersion: + description: APIVersion of the referenced object. + type: string + kind: + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + uid: + description: UID of the referenced object. + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - providerConfigRef + - resourceRef + type: object + served: true + storage: true + subresources: {} diff --git a/package/crds/confluent.crossplane.io_storeconfigs.yaml b/package/crds/confluent.crossplane.io_storeconfigs.yaml new file mode 100644 index 0000000..b49e202 --- /dev/null +++ b/package/crds/confluent.crossplane.io_storeconfigs.yaml @@ -0,0 +1,343 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: storeconfigs.confluent.crossplane.io +spec: + group: confluent.crossplane.io + names: + categories: + - crossplane + - store + - gcp + kind: StoreConfig + listKind: StoreConfigList + plural: storeconfigs + singular: storeconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .spec.type + name: TYPE + type: string + - jsonPath: .spec.defaultScope + name: DEFAULT-SCOPE + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A StoreConfig configures how GCP controller should store connection + details. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A StoreConfigSpec defines the desired state of a ProviderConfig. + properties: + defaultScope: + description: DefaultScope used for scoping secrets for "cluster-scoped" + resources. If store type is "Kubernetes", this would mean the default + namespace to store connection secrets for cluster scoped resources. + In case of "Vault", this would be used as the default parent path. + Typically, should be set as Crossplane installation namespace. + type: string + kubernetes: + description: Kubernetes configures a Kubernetes secret store. If the + "type" is "Kubernetes" but no config provided, in cluster config + will be used. + properties: + auth: + description: Credentials used to connect to the Kubernetes API. + properties: + env: + description: Env is a reference to an environment variable + that contains credentials that must be used to connect to + the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: Fs is a reference to a filesystem location that + contains credentials that must be used to connect to the + provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: A SecretRef is a reference to a secret key that + contains the credentials that must be used to connect to + the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the credentials. + enum: + - None + - Secret + - Environment + - Filesystem + type: string + required: + - source + type: object + required: + - auth + type: object + plugin: + description: Plugin configures External secret store as a plugin. + properties: + configRef: + description: ConfigRef contains store config reference info. + properties: + apiVersion: + description: APIVersion of the referenced config. + type: string + kind: + description: Kind of the referenced config. + type: string + name: + description: Name of the referenced config. + type: string + required: + - apiVersion + - kind + - name + type: object + endpoint: + description: Endpoint is the endpoint of the gRPC server. + type: string + type: object + type: + default: Kubernetes + description: Type configures which secret store to be used. Only the + configuration block for this store will be used and others will + be ignored if provided. Default is Kubernetes. + enum: + - Kubernetes + - Vault + - Plugin + type: string + vault: + description: 'Vault configures a Vault secret store. Deprecated: This + API is scheduled to be removed in a future release. Vault should + be used as a plugin going forward. See https://github.com/crossplane-contrib/ess-plugin-vault + for more information.' + properties: + auth: + description: Auth configures an authentication method for Vault. + properties: + method: + description: Method configures which auth method will be used. + type: string + token: + description: Token configures Token Auth for Vault. + properties: + env: + description: Env is a reference to an environment variable + that contains credentials that must be used to connect + to the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: Fs is a reference to a filesystem location + that contains credentials that must be used to connect + to the provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: A SecretRef is a reference to a secret key + that contains the credentials that must be used to connect + to the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the credentials. + enum: + - None + - Secret + - Environment + - Filesystem + type: string + required: + - source + type: object + required: + - method + type: object + caBundle: + description: CABundle configures CA bundle for Vault Server. + properties: + env: + description: Env is a reference to an environment variable + that contains credentials that must be used to connect to + the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: Fs is a reference to a filesystem location that + contains credentials that must be used to connect to the + provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: A SecretRef is a reference to a secret key that + contains the credentials that must be used to connect to + the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the credentials. + enum: + - None + - Secret + - Environment + - Filesystem + type: string + required: + - source + type: object + mountPath: + description: MountPath is the mount path of the KV secrets engine. + type: string + server: + description: Server is the url of the Vault server, e.g. "https://vault.acme.org" + type: string + version: + default: v2 + description: Version of the KV Secrets engine of Vault. https://www.vaultproject.io/docs/secrets/kv + type: string + required: + - auth + - mountPath + - server + type: object + required: + - defaultScope + type: object + status: + description: A StoreConfigStatus represents the status of a StoreConfig. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kafka.crossplane.io_acls.yaml b/package/crds/kafka.crossplane.io_acls.yaml new file mode 100644 index 0000000..fa4d0f2 --- /dev/null +++ b/package/crds/kafka.crossplane.io_acls.yaml @@ -0,0 +1,476 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: acls.kafka.crossplane.io +spec: + group: kafka.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: ACL + listKind: ACLList + plural: acls + singular: acl + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ACL is the Schema for the ACLs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ACLSpec defines the desired state of ACL + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + credentials: + description: The Cluster API Credentials. + items: + properties: + keySecretRef: + description: The Cluster API Key for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Cluster API Secret for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + host: + description: The host for the ACL. + type: string + kafkaCluster: + items: + properties: + id: + description: The Kafka cluster ID (e.g., `lkc-12345`). + type: string + required: + - id + type: object + type: array + operation: + description: The operation type for the ACL. + type: string + patternType: + description: The pattern type for the ACL. + type: string + permission: + description: The permission for the ACL. + type: string + principal: + description: The principal for the ACL. + type: string + resourceName: + description: The resource name for the ACL. + type: string + resourceType: + description: The type of the resource. + type: string + restEndpoint: + description: The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: host is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.host) + - message: operation is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.operation) + - message: patternType is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.patternType) + - message: permission is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.permission) + - message: principal is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.principal) + - message: resourceName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.resourceName) + - message: resourceType is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.resourceType) + status: + description: ACLStatus defines the observed state of ACL. + properties: + atProvider: + properties: + credentials: + description: The Cluster API Credentials. + items: + properties: + keySecretRef: + description: The Cluster API Key for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Cluster API Secret for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + host: + description: The host for the ACL. + type: string + id: + type: string + kafkaCluster: + items: + properties: + id: + description: The Kafka cluster ID (e.g., `lkc-12345`). + type: string + type: object + type: array + operation: + description: The operation type for the ACL. + type: string + patternType: + description: The pattern type for the ACL. + type: string + permission: + description: The permission for the ACL. + type: string + principal: + description: The principal for the ACL. + type: string + resourceName: + description: The resource name for the ACL. + type: string + resourceType: + description: The type of the resource. + type: string + restEndpoint: + description: The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kafka.crossplane.io_clusterconfigs.yaml b/package/crds/kafka.crossplane.io_clusterconfigs.yaml new file mode 100644 index 0000000..becfee8 --- /dev/null +++ b/package/crds/kafka.crossplane.io_clusterconfigs.yaml @@ -0,0 +1,434 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: clusterconfigs.kafka.crossplane.io +spec: + group: kafka.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: ClusterConfig + listKind: ClusterConfigList + plural: clusterconfigs + singular: clusterconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterConfig is the Schema for the ClusterConfigs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterConfigSpec defines the desired state of ClusterConfig + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + config: + additionalProperties: + type: string + description: The custom cluster settings to set (e.g., `"num.partitions" + = "8"`). + type: object + credentials: + description: The Cluster API Credentials. + items: + properties: + keySecretRef: + description: The Cluster API Key for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Cluster API Secret for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + kafkaCluster: + items: + properties: + id: + description: The Kafka cluster ID (e.g., `lkc-12345`). + type: string + required: + - id + type: object + type: array + restEndpoint: + description: The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: config is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.config) + status: + description: ClusterConfigStatus defines the observed state of ClusterConfig. + properties: + atProvider: + properties: + config: + additionalProperties: + type: string + description: The custom cluster settings to set (e.g., `"num.partitions" + = "8"`). + type: object + credentials: + description: The Cluster API Credentials. + items: + properties: + keySecretRef: + description: The Cluster API Key for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Cluster API Secret for your Confluent Cloud + cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + id: + type: string + kafkaCluster: + items: + properties: + id: + description: The Kafka cluster ID (e.g., `lkc-12345`). + type: string + type: object + type: array + restEndpoint: + description: The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kafka.crossplane.io_clusters.yaml b/package/crds/kafka.crossplane.io_clusters.yaml new file mode 100644 index 0000000..fae2154 --- /dev/null +++ b/package/crds/kafka.crossplane.io_clusters.yaml @@ -0,0 +1,452 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: clusters.kafka.crossplane.io +spec: + group: kafka.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + availability: + description: The availability zone configuration of the Kafka + cluster. + type: string + basic: + items: + type: object + type: array + cloud: + description: The cloud service provider that runs the Kafka cluster. + type: string + dedicated: + items: + properties: + cku: + description: The number of Confluent Kafka Units (CKUs) + for Dedicated cluster types. MULTI_ZONE dedicated clusters + must have at least two CKUs. + type: number + encryptionKey: + description: The ID of the encryption key that is used to + encrypt the data in the Kafka cluster. + type: string + required: + - cku + type: object + type: array + displayName: + description: The name of the Kafka cluster. + type: string + environment: + description: Environment objects represent an isolated namespace + for your Confluent resources for organizational purposes. + items: + properties: + id: + description: The unique identifier for the environment. + type: string + required: + - id + type: object + type: array + network: + description: Network represents a network (VPC) in Confluent Cloud. + All Networks exist within Confluent-managed cloud provider accounts. + items: + properties: + id: + description: The unique identifier for the network. + type: string + required: + - id + type: object + type: array + region: + description: The cloud service provider region where the Kafka + cluster is running. + type: string + standard: + items: + type: object + type: array + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: availability is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.availability) + - message: cloud is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.cloud) + - message: displayName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.displayName) + - message: environment is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.environment) + - message: region is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.region) + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + apiVersion: + description: API Version defines the schema version of this representation + of a Kafka cluster. + type: string + availability: + description: The availability zone configuration of the Kafka + cluster. + type: string + basic: + items: + type: object + type: array + bootstrapEndpoint: + description: The bootstrap endpoint used by Kafka clients to connect + to the Kafka cluster. + type: string + cloud: + description: The cloud service provider that runs the Kafka cluster. + type: string + dedicated: + items: + properties: + cku: + description: The number of Confluent Kafka Units (CKUs) + for Dedicated cluster types. MULTI_ZONE dedicated clusters + must have at least two CKUs. + type: number + encryptionKey: + description: The ID of the encryption key that is used to + encrypt the data in the Kafka cluster. + type: string + type: object + type: array + displayName: + description: The name of the Kafka cluster. + type: string + environment: + description: Environment objects represent an isolated namespace + for your Confluent resources for organizational purposes. + items: + properties: + id: + description: The unique identifier for the environment. + type: string + type: object + type: array + id: + type: string + kind: + description: Kind defines the object Kafka cluster represents. + type: string + network: + description: Network represents a network (VPC) in Confluent Cloud. + All Networks exist within Confluent-managed cloud provider accounts. + items: + properties: + id: + description: The unique identifier for the network. + type: string + type: object + type: array + rbacCrn: + description: The Confluent Resource Name of the Kafka cluster + suitable for confluent_role_binding's crn_pattern. + type: string + region: + description: The cloud service provider region where the Kafka + cluster is running. + type: string + restEndpoint: + description: The REST endpoint of the Kafka cluster. + type: string + standard: + items: + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/role.crossplane.io_bindings.yaml b/package/crds/role.crossplane.io_bindings.yaml new file mode 100644 index 0000000..f76d879 --- /dev/null +++ b/package/crds/role.crossplane.io_bindings.yaml @@ -0,0 +1,332 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: bindings.role.crossplane.io +spec: + group: role.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: Binding + listKind: BindingList + plural: bindings + singular: binding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Binding is the Schema for the Bindings API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BindingSpec defines the desired state of Binding + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + crnPattern: + description: A CRN that specifies the scope and resource patterns + necessary for the role to bind. + type: string + principal: + description: The principal User to bind the role to. + type: string + roleName: + description: The name of the role to bind to the principal. + type: string + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: crnPattern is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.crnPattern) + - message: principal is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.principal) + - message: roleName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.roleName) + status: + description: BindingStatus defines the observed state of Binding. + properties: + atProvider: + properties: + crnPattern: + description: A CRN that specifies the scope and resource patterns + necessary for the role to bind. + type: string + id: + type: string + principal: + description: The principal User to bind the role to. + type: string + roleName: + description: The name of the role to bind to the principal. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/service.crossplane.io_accounts.yaml b/package/crds/service.crossplane.io_accounts.yaml new file mode 100644 index 0000000..717b9cd --- /dev/null +++ b/package/crds/service.crossplane.io_accounts.yaml @@ -0,0 +1,327 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: accounts.service.crossplane.io +spec: + group: service.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: Account + listKind: AccountList + plural: accounts + singular: account + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A free-form description of the Service Account. + type: string + displayName: + description: A human-readable name for the Service Account. + type: string + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: displayName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.displayName) + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + apiVersion: + description: API Version defines the schema version of this representation + of a Service Account. + type: string + description: + description: A free-form description of the Service Account. + type: string + displayName: + description: A human-readable name for the Service Account. + type: string + id: + type: string + kind: + description: Kind defines the object Service Account represents. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crossplane.yaml b/package/crossplane.yaml new file mode 100644 index 0000000..a5fe680 --- /dev/null +++ b/package/crossplane.yaml @@ -0,0 +1,4 @@ +apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Provider +metadata: + name: provider-confluent diff --git a/scripts/version_diff.py b/scripts/version_diff.py new file mode 100644 index 0000000..b373830 --- /dev/null +++ b/scripts/version_diff.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 + +import json +import sys + +# usage: version_diff.py +# example usage: version_diff.py config/generated.lst .work/schema.json.3.38.0 config/schema.json +if __name__ == "__main__": + base_path = sys.argv[2] + bumped_path = sys.argv[3] + print(f'Reporting schema changes between "{base_path}" as base version and "{bumped_path}" as bumped version') + with open(sys.argv[1]) as f: + resources = json.load(f) + with open(base_path) as f: + base = json.load(f) + with open(bumped_path) as f: + bump = json.load(f) + + provider_name = None + for k in base["provider_schemas"]: + # the first key is the provider name + provider_name = k + break + if provider_name is None: + print(f"Cannot extract the provider name from the base schema: {base_path}") + sys.exit(-1) + base_schemas = base["provider_schemas"][provider_name]["resource_schemas"] + bumped_schemas = bump["provider_schemas"][provider_name]["resource_schemas"] + + for name in resources: + try: + if base_schemas[name]["version"] != bumped_schemas[name]["version"]: + print(f'{name}:{base_schemas[name]["version"]}-{bumped_schemas[name]["version"]}') + except KeyError as ke: + print(f'{name} is not found in schema: {ke}') + continue