From 3f02274231acbf5482f4b9224bec1a2141b08aa7 Mon Sep 17 00:00:00 2001 From: Blake Devcich <89158881+bdevcich-hpe@users.noreply.github.com> Date: Tue, 24 Jan 2023 12:05:08 -0600 Subject: [PATCH 01/24] Change DM Manager to use port 22 for ssh/mpirun + update nnf-mfu image (#81) The DM manager was configured to use port 2222. However, the DM worker nodes running sshd are only listening on port 22. To simplify things, DM manager has gone back to port 22. Additionally, the worker image was updated to track the latest version of nnf-mfu. Tested in kind VM. The manager can hit the worker via mpirun now that it's targeting port 22: ``` $ kubectl -n nnf-dm-system exec nnf-dm-controller-manager-587495bc7-qrxhg -c manager -- mpirun --allow-run-as-root --host 10-244-3-3.dm.nnf-dm-system hostname Warning: Permanently added '10-244-3-3.dm.nnf-dm-system,10.244.3.3' (ECDSA) to the list of known hosts. nnf-dm-worker-8w7cs ``` Signed-off-by: Blake Devcich --- Dockerfile | 2 -- config/manager/manager.yaml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index a9f8e199..faadd443 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,9 +62,7 @@ RUN apt install -y bash # The following lines are from the mpiFileUtils (nnf-mfu) Dockerfile; # do not change them unless you know what it is you are doing -ARG port=2222 RUN sed -i "s/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g" /etc/ssh/ssh_config \ - && sed -i "s/[ #]\(.*Port \).*/ \1$port/g" /etc/ssh/ssh_config \ && echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config # Copy the executable and execute diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 8f1832e5..3ef9121a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -78,7 +78,7 @@ spec: shareProcessNamespace: true containers: - name: worker - image: ghcr.io/nearnodeflash/nnf-mfu:82aa3c4b6655433dacbe3294ad359161c62219c6 + image: ghcr.io/nearnodeflash/nnf-mfu:latest command: - /usr/sbin/sshd args: From fa5386bef2d5aa7668fe396d7ac7b4a5930e7905 Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Thu, 26 Jan 2023 15:56:58 -0600 Subject: [PATCH 02/24] Allow builds on all branches. (#82) Loosen the branch filter for pushes. Print some event context, for future debugging. Remove an unused variable from verify_tag. Rename some jobs to give them unique names, to help with debugging. Signed-off-by: Dean Roehrich --- .github/workflows/main.yml | 8 ++++++-- .github/workflows/nnf_dm_lib.yml | 2 +- .github/workflows/rpm_build.yml | 2 +- .github/workflows/verify_tag.yml | 10 ++++++---- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f2e79507..90c2c21e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -3,8 +3,7 @@ name: Docker build and push on: push: branches: - - 'master' - - 'releases/v*' + - '*' tags: - 'v*' pull_request: @@ -27,6 +26,11 @@ jobs: runs-on: ubuntu-latest steps: + - name: "Build context" + run: | + echo "ref is ${{ github.ref }}" + echo "ref_type is ${{ github.ref_type }}" + - name: "Checkout repository" id: checkout_repo uses: actions/checkout@v3 diff --git a/.github/workflows/nnf_dm_lib.yml b/.github/workflows/nnf_dm_lib.yml index c5b6bb6a..7061f645 100644 --- a/.github/workflows/nnf_dm_lib.yml +++ b/.github/workflows/nnf_dm_lib.yml @@ -2,7 +2,7 @@ name: NNF-DM Library Build on: push jobs: - build: + lib_build: runs-on: ubuntu-latest container: ghcr.io/nearnodeflash/grpc-cxx:latest steps: diff --git a/.github/workflows/rpm_build.yml b/.github/workflows/rpm_build.yml index 9d78b108..c99a7c41 100644 --- a/.github/workflows/rpm_build.yml +++ b/.github/workflows/rpm_build.yml @@ -2,7 +2,7 @@ name: RPM Build on: push jobs: - build: + rpm_build: runs-on: ubuntu-latest container: image: centos:8 diff --git a/.github/workflows/verify_tag.yml b/.github/workflows/verify_tag.yml index d4b6fe97..2a75d910 100644 --- a/.github/workflows/verify_tag.yml +++ b/.github/workflows/verify_tag.yml @@ -7,13 +7,15 @@ on: tags: - "v*" -env: - IMAGE_NAME: ${{ github.repository }} - jobs: - build: + verify_tag: runs-on: ubuntu-latest steps: + - name: "Verify context" + run: | + echo "ref is ${{ github.ref }}" + echo "ref_type is ${{ github.ref_type }}" + - uses: actions/checkout@v3 # actions/checkout@v3 breaks annotated tags by converting them into # lightweight tags, so we need to force fetch the tag again From d5df11f4b1eaeabaf1857fc37d5dbde75439f4f6 Mon Sep 17 00:00:00 2001 From: Nate Thornton Date: Mon, 6 Feb 2023 09:47:18 -0600 Subject: [PATCH 03/24] =?UTF-8?q?New=20Version=20endpoint=20with=20support?= =?UTF-8?q?=20for=20the=20data-mover=20'version'=20and=20a=20=E2=80=A6=20(?= =?UTF-8?q?#83)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * New Version endpoint with support for the data-mover 'version' and a list of supported 'API versions' * Add support for including the git commit hash when building the daemon Signed-off-by: Nate Thornton --- Makefile | 5 +- daemons/compute/_client-c/datamovement.pb-c.c | 924 ++++++++++++++++-- daemons/compute/_client-c/datamovement.pb-c.h | 357 ++++++- daemons/compute/api/datamovement.proto | 13 + .../compute/client-go/api/datamovement.pb.go | 545 ++++++----- .../client-go/api/datamovement_grpc.pb.go | 43 +- daemons/compute/client-go/main.go | 45 +- daemons/compute/client-py/datamovement_pb2.py | 85 +- .../client-py/datamovement_pb2_grpc.py | 36 + daemons/compute/lib-cpp/client.cc | 40 + daemons/compute/lib-cpp/client.h | 15 + .../compute/lib-cpp/example/CMakeLists.txt | 2 +- daemons/compute/lib-cpp/example/client.cc | 29 +- daemons/compute/server/main.go | 3 + .../compute/server/servers/server_default.go | 9 + daemons/compute/server/servers/server_sim.go | 9 + daemons/compute/server/version/version.go | 46 + .../protobuf/types/known/emptypb/empty.pb.go | 168 ++++ vendor/modules.txt | 5 +- 19 files changed, 2005 insertions(+), 374 deletions(-) create mode 100644 daemons/compute/server/version/version.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go diff --git a/Makefile b/Makefile index 547dd0e0..134b5e88 100644 --- a/Makefile +++ b/Makefile @@ -119,8 +119,11 @@ container-unit-test: .version ## Run tests inside a container image $(DOCKER) run --rm -t --name $@-nnf-dm $(IMAGE_TAG_BASE)-$@:$(VERSION) ##@ Build + +build-daemon: COMMIT_HASH = $(shell git rev-parse --short HEAD) +build-daemon: PACKAGE = github.com/NearNodeFlash/nnf-dm/daemons/compute/server/version build-daemon: manifests generate fmt vet ## Build standalone nnf-datamovement daemon - GOOS=linux GOARCH=amd64 go build -o bin/nnf-dm daemons/compute/server/main.go + GOOS=linux GOARCH=amd64 go build -ldflags="-X '$(PACKAGE).commitHash=$(COMMIT_HASH)'" -o bin/nnf-dm daemons/compute/server/main.go build: generate fmt vet ## Build manager binary. go build -o bin/manager main.go diff --git a/daemons/compute/_client-c/datamovement.pb-c.c b/daemons/compute/_client-c/datamovement.pb-c.c index 721b573e..0832d253 100644 --- a/daemons/compute/_client-c/datamovement.pb-c.c +++ b/daemons/compute/_client-c/datamovement.pb-c.c @@ -7,6 +7,141 @@ #endif #include "datamovement.pb-c.h" +void datamovement__data_movement_version_response__init + (Datamovement__DataMovementVersionResponse *message) +{ + static const Datamovement__DataMovementVersionResponse init_value = DATAMOVEMENT__DATA_MOVEMENT_VERSION_RESPONSE__INIT; + *message = init_value; +} +size_t datamovement__data_movement_version_response__get_packed_size + (const Datamovement__DataMovementVersionResponse *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_version_response__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_version_response__pack + (const Datamovement__DataMovementVersionResponse *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_version_response__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_version_response__pack_to_buffer + (const Datamovement__DataMovementVersionResponse *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_version_response__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementVersionResponse * + datamovement__data_movement_version_response__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementVersionResponse *) + protobuf_c_message_unpack (&datamovement__data_movement_version_response__descriptor, + allocator, len, data); +} +void datamovement__data_movement_version_response__free_unpacked + (Datamovement__DataMovementVersionResponse *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_version_response__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} +void datamovement__data_movement_workflow__init + (Datamovement__DataMovementWorkflow *message) +{ + static const Datamovement__DataMovementWorkflow init_value = DATAMOVEMENT__DATA_MOVEMENT_WORKFLOW__INIT; + *message = init_value; +} +size_t datamovement__data_movement_workflow__get_packed_size + (const Datamovement__DataMovementWorkflow *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_workflow__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_workflow__pack + (const Datamovement__DataMovementWorkflow *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_workflow__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_workflow__pack_to_buffer + (const Datamovement__DataMovementWorkflow *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_workflow__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementWorkflow * + datamovement__data_movement_workflow__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementWorkflow *) + protobuf_c_message_unpack (&datamovement__data_movement_workflow__descriptor, + allocator, len, data); +} +void datamovement__data_movement_workflow__free_unpacked + (Datamovement__DataMovementWorkflow *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_workflow__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} +void datamovement__data_movement_command_status__init + (Datamovement__DataMovementCommandStatus *message) +{ + static const Datamovement__DataMovementCommandStatus init_value = DATAMOVEMENT__DATA_MOVEMENT_COMMAND_STATUS__INIT; + *message = init_value; +} +size_t datamovement__data_movement_command_status__get_packed_size + (const Datamovement__DataMovementCommandStatus *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_command_status__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_command_status__pack + (const Datamovement__DataMovementCommandStatus *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_command_status__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_command_status__pack_to_buffer + (const Datamovement__DataMovementCommandStatus *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_command_status__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementCommandStatus * + datamovement__data_movement_command_status__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementCommandStatus *) + protobuf_c_message_unpack (&datamovement__data_movement_command_status__descriptor, + allocator, len, data); +} +void datamovement__data_movement_command_status__free_unpacked + (Datamovement__DataMovementCommandStatus *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_command_status__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} void datamovement__data_movement_create_request__init (Datamovement__DataMovementCreateRequest *message) { @@ -277,35 +412,395 @@ void datamovement__data_movement_delete_response__free_unpacked assert(message->base.descriptor == &datamovement__data_movement_delete_response__descriptor); protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); } -static const ProtobufCFieldDescriptor datamovement__data_movement_create_request__field_descriptors[5] = +void datamovement__data_movement_list_request__init + (Datamovement__DataMovementListRequest *message) +{ + static const Datamovement__DataMovementListRequest init_value = DATAMOVEMENT__DATA_MOVEMENT_LIST_REQUEST__INIT; + *message = init_value; +} +size_t datamovement__data_movement_list_request__get_packed_size + (const Datamovement__DataMovementListRequest *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_list_request__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_list_request__pack + (const Datamovement__DataMovementListRequest *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_list_request__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_list_request__pack_to_buffer + (const Datamovement__DataMovementListRequest *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_list_request__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementListRequest * + datamovement__data_movement_list_request__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementListRequest *) + protobuf_c_message_unpack (&datamovement__data_movement_list_request__descriptor, + allocator, len, data); +} +void datamovement__data_movement_list_request__free_unpacked + (Datamovement__DataMovementListRequest *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_list_request__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} +void datamovement__data_movement_list_response__init + (Datamovement__DataMovementListResponse *message) { + static const Datamovement__DataMovementListResponse init_value = DATAMOVEMENT__DATA_MOVEMENT_LIST_RESPONSE__INIT; + *message = init_value; +} +size_t datamovement__data_movement_list_response__get_packed_size + (const Datamovement__DataMovementListResponse *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_list_response__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_list_response__pack + (const Datamovement__DataMovementListResponse *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_list_response__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_list_response__pack_to_buffer + (const Datamovement__DataMovementListResponse *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_list_response__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementListResponse * + datamovement__data_movement_list_response__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementListResponse *) + protobuf_c_message_unpack (&datamovement__data_movement_list_response__descriptor, + allocator, len, data); +} +void datamovement__data_movement_list_response__free_unpacked + (Datamovement__DataMovementListResponse *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_list_response__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} +void datamovement__data_movement_cancel_request__init + (Datamovement__DataMovementCancelRequest *message) +{ + static const Datamovement__DataMovementCancelRequest init_value = DATAMOVEMENT__DATA_MOVEMENT_CANCEL_REQUEST__INIT; + *message = init_value; +} +size_t datamovement__data_movement_cancel_request__get_packed_size + (const Datamovement__DataMovementCancelRequest *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_cancel_request__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_cancel_request__pack + (const Datamovement__DataMovementCancelRequest *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_cancel_request__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_cancel_request__pack_to_buffer + (const Datamovement__DataMovementCancelRequest *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_cancel_request__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementCancelRequest * + datamovement__data_movement_cancel_request__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementCancelRequest *) + protobuf_c_message_unpack (&datamovement__data_movement_cancel_request__descriptor, + allocator, len, data); +} +void datamovement__data_movement_cancel_request__free_unpacked + (Datamovement__DataMovementCancelRequest *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_cancel_request__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} +void datamovement__data_movement_cancel_response__init + (Datamovement__DataMovementCancelResponse *message) +{ + static const Datamovement__DataMovementCancelResponse init_value = DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__INIT; + *message = init_value; +} +size_t datamovement__data_movement_cancel_response__get_packed_size + (const Datamovement__DataMovementCancelResponse *message) +{ + assert(message->base.descriptor == &datamovement__data_movement_cancel_response__descriptor); + return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); +} +size_t datamovement__data_movement_cancel_response__pack + (const Datamovement__DataMovementCancelResponse *message, + uint8_t *out) +{ + assert(message->base.descriptor == &datamovement__data_movement_cancel_response__descriptor); + return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); +} +size_t datamovement__data_movement_cancel_response__pack_to_buffer + (const Datamovement__DataMovementCancelResponse *message, + ProtobufCBuffer *buffer) +{ + assert(message->base.descriptor == &datamovement__data_movement_cancel_response__descriptor); + return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); +} +Datamovement__DataMovementCancelResponse * + datamovement__data_movement_cancel_response__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data) +{ + return (Datamovement__DataMovementCancelResponse *) + protobuf_c_message_unpack (&datamovement__data_movement_cancel_response__descriptor, + allocator, len, data); +} +void datamovement__data_movement_cancel_response__free_unpacked + (Datamovement__DataMovementCancelResponse *message, + ProtobufCAllocator *allocator) +{ + if(!message) + return; + assert(message->base.descriptor == &datamovement__data_movement_cancel_response__descriptor); + protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); +} +static const ProtobufCFieldDescriptor datamovement__data_movement_version_response__field_descriptors[2] = +{ + { + "version", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementVersionResponse, version), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "apiVersions", + 2, + PROTOBUF_C_LABEL_REPEATED, + PROTOBUF_C_TYPE_INT32, + offsetof(Datamovement__DataMovementVersionResponse, n_apiversions), + offsetof(Datamovement__DataMovementVersionResponse, apiversions), + NULL, + NULL, + 0 | PROTOBUF_C_FIELD_FLAG_PACKED, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned datamovement__data_movement_version_response__field_indices_by_name[] = { + 1, /* field[1] = apiVersions */ + 0, /* field[0] = version */ +}; +static const ProtobufCIntRange datamovement__data_movement_version_response__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 2 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_version_response__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementVersionResponse", + "DataMovementVersionResponse", + "Datamovement__DataMovementVersionResponse", + "datamovement", + sizeof(Datamovement__DataMovementVersionResponse), + 2, + datamovement__data_movement_version_response__field_descriptors, + datamovement__data_movement_version_response__field_indices_by_name, + 1, datamovement__data_movement_version_response__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_version_response__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCFieldDescriptor datamovement__data_movement_workflow__field_descriptors[2] = +{ + { + "name", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementWorkflow, name), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "namespace", + 2, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementWorkflow, namespace_), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned datamovement__data_movement_workflow__field_indices_by_name[] = { + 0, /* field[0] = name */ + 1, /* field[1] = namespace */ +}; +static const ProtobufCIntRange datamovement__data_movement_workflow__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 2 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_workflow__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementWorkflow", + "DataMovementWorkflow", + "Datamovement__DataMovementWorkflow", + "datamovement", + sizeof(Datamovement__DataMovementWorkflow), + 2, + datamovement__data_movement_workflow__field_descriptors, + datamovement__data_movement_workflow__field_indices_by_name, + 1, datamovement__data_movement_workflow__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_workflow__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCFieldDescriptor datamovement__data_movement_command_status__field_descriptors[5] = +{ + { + "command", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCommandStatus, command), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "progress", + 2, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_INT32, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCommandStatus, progress), + NULL, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "elapsedTime", + 3, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCommandStatus, elapsedtime), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, { - "workflow", - 3, + "lastMessage", + 4, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ - offsetof(Datamovement__DataMovementCreateRequest, workflow), + offsetof(Datamovement__DataMovementCommandStatus, lastmessage), NULL, &protobuf_c_empty_string, 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, { - "namespace", - 4, + "lastMessageTime", + 5, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ - offsetof(Datamovement__DataMovementCreateRequest, namespace_), + offsetof(Datamovement__DataMovementCommandStatus, lastmessagetime), NULL, &protobuf_c_empty_string, 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, +}; +static const unsigned datamovement__data_movement_command_status__field_indices_by_name[] = { + 0, /* field[0] = command */ + 2, /* field[2] = elapsedTime */ + 3, /* field[3] = lastMessage */ + 4, /* field[4] = lastMessageTime */ + 1, /* field[1] = progress */ +}; +static const ProtobufCIntRange datamovement__data_movement_command_status__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 5 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_command_status__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementCommandStatus", + "DataMovementCommandStatus", + "Datamovement__DataMovementCommandStatus", + "datamovement", + sizeof(Datamovement__DataMovementCommandStatus), + 5, + datamovement__data_movement_command_status__field_descriptors, + datamovement__data_movement_command_status__field_indices_by_name, + 1, datamovement__data_movement_command_status__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_command_status__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCFieldDescriptor datamovement__data_movement_create_request__field_descriptors[4] = +{ + { + "workflow", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_MESSAGE, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCreateRequest, workflow), + &datamovement__data_movement_workflow__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, { "source", - 5, + 2, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ @@ -317,7 +812,7 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_create_request }, { "destination", - 6, + 3, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ @@ -329,7 +824,7 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_create_request }, { "dryrun", - 7, + 4, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_BOOL, 0, /* quantifier_offset */ @@ -341,16 +836,15 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_create_request }, }; static const unsigned datamovement__data_movement_create_request__field_indices_by_name[] = { - 3, /* field[3] = destination */ - 4, /* field[4] = dryrun */ - 1, /* field[1] = namespace */ - 2, /* field[2] = source */ + 2, /* field[2] = destination */ + 3, /* field[3] = dryrun */ + 1, /* field[1] = source */ 0, /* field[0] = workflow */ }; static const ProtobufCIntRange datamovement__data_movement_create_request__number_ranges[1 + 1] = { - { 3, 0 }, - { 0, 5 } + { 1, 0 }, + { 0, 4 } }; const ProtobufCMessageDescriptor datamovement__data_movement_create_request__descriptor = { @@ -360,25 +854,27 @@ const ProtobufCMessageDescriptor datamovement__data_movement_create_request__des "Datamovement__DataMovementCreateRequest", "datamovement", sizeof(Datamovement__DataMovementCreateRequest), - 5, + 4, datamovement__data_movement_create_request__field_descriptors, datamovement__data_movement_create_request__field_indices_by_name, 1, datamovement__data_movement_create_request__number_ranges, (ProtobufCMessageInit) datamovement__data_movement_create_request__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCEnumValue datamovement__data_movement_create_response__status__enum_values_by_number[2] = +static const ProtobufCEnumValue datamovement__data_movement_create_response__status__enum_values_by_number[3] = { - { "CREATED", "DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__CREATED", 0 }, + { "SUCCESS", "DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__SUCCESS", 0 }, { "FAILED", "DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__FAILED", 1 }, + { "INVALID", "DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__INVALID", 2 }, }; static const ProtobufCIntRange datamovement__data_movement_create_response__status__value_ranges[] = { -{0, 0},{0, 2} +{0, 0},{0, 3} }; -static const ProtobufCEnumValueIndex datamovement__data_movement_create_response__status__enum_values_by_name[2] = +static const ProtobufCEnumValueIndex datamovement__data_movement_create_response__status__enum_values_by_name[3] = { - { "CREATED", 0 }, { "FAILED", 1 }, + { "INVALID", 2 }, + { "SUCCESS", 0 }, }; const ProtobufCEnumDescriptor datamovement__data_movement_create_response__status__descriptor = { @@ -387,9 +883,9 @@ const ProtobufCEnumDescriptor datamovement__data_movement_create_response__statu "Status", "Datamovement__DataMovementCreateResponse__Status", "datamovement", - 2, + 3, datamovement__data_movement_create_response__status__enum_values_by_number, - 2, + 3, datamovement__data_movement_create_response__status__enum_values_by_name, 1, datamovement__data_movement_create_response__status__value_ranges, @@ -459,12 +955,24 @@ const ProtobufCMessageDescriptor datamovement__data_movement_create_response__de (ProtobufCMessageInit) datamovement__data_movement_create_response__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor datamovement__data_movement_status_request__field_descriptors[2] = +static const ProtobufCFieldDescriptor datamovement__data_movement_status_request__field_descriptors[3] = { { - "uid", + "workflow", 1, PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_MESSAGE, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementStatusRequest, workflow), + &datamovement__data_movement_workflow__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "uid", + 2, + PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ offsetof(Datamovement__DataMovementStatusRequest, uid), @@ -475,7 +983,7 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_status_request }, { "maxWaitTime", - 2, + 3, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_INT64, 0, /* quantifier_offset */ @@ -487,13 +995,14 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_status_request }, }; static const unsigned datamovement__data_movement_status_request__field_indices_by_name[] = { - 1, /* field[1] = maxWaitTime */ - 0, /* field[0] = uid */ + 2, /* field[2] = maxWaitTime */ + 1, /* field[1] = uid */ + 0, /* field[0] = workflow */ }; static const ProtobufCIntRange datamovement__data_movement_status_request__number_ranges[1 + 1] = { { 1, 0 }, - { 0, 2 } + { 0, 3 } }; const ProtobufCMessageDescriptor datamovement__data_movement_status_request__descriptor = { @@ -503,31 +1012,33 @@ const ProtobufCMessageDescriptor datamovement__data_movement_status_request__des "Datamovement__DataMovementStatusRequest", "datamovement", sizeof(Datamovement__DataMovementStatusRequest), - 2, + 3, datamovement__data_movement_status_request__field_descriptors, datamovement__data_movement_status_request__field_indices_by_name, 1, datamovement__data_movement_status_request__number_ranges, (ProtobufCMessageInit) datamovement__data_movement_status_request__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCEnumValue datamovement__data_movement_status_response__state__enum_values_by_number[5] = +static const ProtobufCEnumValue datamovement__data_movement_status_response__state__enum_values_by_number[6] = { { "PENDING", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__PENDING", 0 }, { "STARTING", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__STARTING", 1 }, { "RUNNING", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__RUNNING", 2 }, { "COMPLETED", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__COMPLETED", 3 }, - { "UNKNOWN_STATE", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__UNKNOWN_STATE", 4 }, + { "CANCELLING", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__CANCELLING", 4 }, + { "UNKNOWN_STATE", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__UNKNOWN_STATE", 5 }, }; static const ProtobufCIntRange datamovement__data_movement_status_response__state__value_ranges[] = { -{0, 0},{0, 5} +{0, 0},{0, 6} }; -static const ProtobufCEnumValueIndex datamovement__data_movement_status_response__state__enum_values_by_name[5] = +static const ProtobufCEnumValueIndex datamovement__data_movement_status_response__state__enum_values_by_name[6] = { + { "CANCELLING", 4 }, { "COMPLETED", 3 }, { "PENDING", 0 }, { "RUNNING", 2 }, { "STARTING", 1 }, - { "UNKNOWN_STATE", 4 }, + { "UNKNOWN_STATE", 5 }, }; const ProtobufCEnumDescriptor datamovement__data_movement_status_response__state__descriptor = { @@ -536,32 +1047,34 @@ const ProtobufCEnumDescriptor datamovement__data_movement_status_response__state "State", "Datamovement__DataMovementStatusResponse__State", "datamovement", - 5, + 6, datamovement__data_movement_status_response__state__enum_values_by_number, - 5, + 6, datamovement__data_movement_status_response__state__enum_values_by_name, 1, datamovement__data_movement_status_response__state__value_ranges, NULL,NULL,NULL,NULL /* reserved[1234] */ }; -static const ProtobufCEnumValue datamovement__data_movement_status_response__status__enum_values_by_number[5] = +static const ProtobufCEnumValue datamovement__data_movement_status_response__status__enum_values_by_number[6] = { { "INVALID", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__INVALID", 0 }, { "NOT_FOUND", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__NOT_FOUND", 1 }, { "SUCCESS", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__SUCCESS", 2 }, { "FAILED", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__FAILED", 3 }, - { "UNKNOWN_STATUS", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__UNKNOWN_STATUS", 4 }, + { "CANCELLED", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__CANCELLED", 4 }, + { "UNKNOWN_STATUS", "DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__UNKNOWN_STATUS", 5 }, }; static const ProtobufCIntRange datamovement__data_movement_status_response__status__value_ranges[] = { -{0, 0},{0, 5} +{0, 0},{0, 6} }; -static const ProtobufCEnumValueIndex datamovement__data_movement_status_response__status__enum_values_by_name[5] = +static const ProtobufCEnumValueIndex datamovement__data_movement_status_response__status__enum_values_by_name[6] = { + { "CANCELLED", 4 }, { "FAILED", 3 }, { "INVALID", 0 }, { "NOT_FOUND", 1 }, { "SUCCESS", 2 }, - { "UNKNOWN_STATUS", 4 }, + { "UNKNOWN_STATUS", 5 }, }; const ProtobufCEnumDescriptor datamovement__data_movement_status_response__status__descriptor = { @@ -570,15 +1083,15 @@ const ProtobufCEnumDescriptor datamovement__data_movement_status_response__statu "Status", "Datamovement__DataMovementStatusResponse__Status", "datamovement", - 5, + 6, datamovement__data_movement_status_response__status__enum_values_by_number, - 5, + 6, datamovement__data_movement_status_response__status__enum_values_by_name, 1, datamovement__data_movement_status_response__status__value_ranges, NULL,NULL,NULL,NULL /* reserved[1234] */ }; -static const ProtobufCFieldDescriptor datamovement__data_movement_status_response__field_descriptors[3] = +static const ProtobufCFieldDescriptor datamovement__data_movement_status_response__field_descriptors[4] = { { "state", @@ -616,8 +1129,21 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_status_respons 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, + { + "commandStatus", + 4, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_MESSAGE, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementStatusResponse, commandstatus), + &datamovement__data_movement_command_status__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, }; static const unsigned datamovement__data_movement_status_response__field_indices_by_name[] = { + 3, /* field[3] = commandStatus */ 2, /* field[2] = message */ 0, /* field[0] = state */ 1, /* field[1] = status */ @@ -625,7 +1151,7 @@ static const unsigned datamovement__data_movement_status_response__field_indices static const ProtobufCIntRange datamovement__data_movement_status_response__number_ranges[1 + 1] = { { 1, 0 }, - { 0, 3 } + { 0, 4 } }; const ProtobufCMessageDescriptor datamovement__data_movement_status_response__descriptor = { @@ -635,19 +1161,31 @@ const ProtobufCMessageDescriptor datamovement__data_movement_status_response__de "Datamovement__DataMovementStatusResponse", "datamovement", sizeof(Datamovement__DataMovementStatusResponse), - 3, + 4, datamovement__data_movement_status_response__field_descriptors, datamovement__data_movement_status_response__field_indices_by_name, 1, datamovement__data_movement_status_response__number_ranges, (ProtobufCMessageInit) datamovement__data_movement_status_response__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor datamovement__data_movement_delete_request__field_descriptors[1] = +static const ProtobufCFieldDescriptor datamovement__data_movement_delete_request__field_descriptors[2] = { { - "uid", + "workflow", 1, PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_MESSAGE, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementDeleteRequest, workflow), + &datamovement__data_movement_workflow__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "uid", + 2, + PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ offsetof(Datamovement__DataMovementDeleteRequest, uid), @@ -658,12 +1196,13 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_delete_request }, }; static const unsigned datamovement__data_movement_delete_request__field_indices_by_name[] = { - 0, /* field[0] = uid */ + 1, /* field[1] = uid */ + 0, /* field[0] = workflow */ }; static const ProtobufCIntRange datamovement__data_movement_delete_request__number_ranges[1 + 1] = { { 1, 0 }, - { 0, 1 } + { 0, 2 } }; const ProtobufCMessageDescriptor datamovement__data_movement_delete_request__descriptor = { @@ -673,7 +1212,7 @@ const ProtobufCMessageDescriptor datamovement__data_movement_delete_request__des "Datamovement__DataMovementDeleteRequest", "datamovement", sizeof(Datamovement__DataMovementDeleteRequest), - 1, + 2, datamovement__data_movement_delete_request__field_descriptors, datamovement__data_movement_delete_request__field_indices_by_name, 1, datamovement__data_movement_delete_request__number_ranges, @@ -684,7 +1223,7 @@ static const ProtobufCEnumValue datamovement__data_movement_delete_response__sta { { "INVALID", "DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__INVALID", 0 }, { "NOT_FOUND", "DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__NOT_FOUND", 1 }, - { "DELETED", "DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__DELETED", 2 }, + { "SUCCESS", "DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__SUCCESS", 2 }, { "ACTIVE", "DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__ACTIVE", 3 }, { "UNKNOWN", "DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__UNKNOWN", 4 }, }; @@ -694,9 +1233,9 @@ static const ProtobufCIntRange datamovement__data_movement_delete_response__stat static const ProtobufCEnumValueIndex datamovement__data_movement_delete_response__status__enum_values_by_name[5] = { { "ACTIVE", 3 }, - { "DELETED", 2 }, { "INVALID", 0 }, { "NOT_FOUND", 1 }, + { "SUCCESS", 2 }, { "UNKNOWN", 4 }, }; const ProtobufCEnumDescriptor datamovement__data_movement_delete_response__status__descriptor = @@ -714,7 +1253,7 @@ const ProtobufCEnumDescriptor datamovement__data_movement_delete_response__statu datamovement__data_movement_delete_response__status__value_ranges, NULL,NULL,NULL,NULL /* reserved[1234] */ }; -static const ProtobufCFieldDescriptor datamovement__data_movement_delete_response__field_descriptors[1] = +static const ProtobufCFieldDescriptor datamovement__data_movement_delete_response__field_descriptors[2] = { { "status", @@ -728,14 +1267,27 @@ static const ProtobufCFieldDescriptor datamovement__data_movement_delete_respons 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, + { + "message", + 2, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementDeleteResponse, message), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, }; static const unsigned datamovement__data_movement_delete_response__field_indices_by_name[] = { + 1, /* field[1] = message */ 0, /* field[0] = status */ }; static const ProtobufCIntRange datamovement__data_movement_delete_response__number_ranges[1 + 1] = { { 1, 0 }, - { 0, 1 } + { 0, 2 } }; const ProtobufCMessageDescriptor datamovement__data_movement_delete_response__descriptor = { @@ -745,23 +1297,239 @@ const ProtobufCMessageDescriptor datamovement__data_movement_delete_response__de "Datamovement__DataMovementDeleteResponse", "datamovement", sizeof(Datamovement__DataMovementDeleteResponse), - 1, + 2, datamovement__data_movement_delete_response__field_descriptors, datamovement__data_movement_delete_response__field_indices_by_name, 1, datamovement__data_movement_delete_response__number_ranges, (ProtobufCMessageInit) datamovement__data_movement_delete_response__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCMethodDescriptor datamovement__data_mover__method_descriptors[3] = +static const ProtobufCFieldDescriptor datamovement__data_movement_list_request__field_descriptors[1] = +{ + { + "workflow", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_MESSAGE, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementListRequest, workflow), + &datamovement__data_movement_workflow__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned datamovement__data_movement_list_request__field_indices_by_name[] = { + 0, /* field[0] = workflow */ +}; +static const ProtobufCIntRange datamovement__data_movement_list_request__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 1 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_list_request__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementListRequest", + "DataMovementListRequest", + "Datamovement__DataMovementListRequest", + "datamovement", + sizeof(Datamovement__DataMovementListRequest), + 1, + datamovement__data_movement_list_request__field_descriptors, + datamovement__data_movement_list_request__field_indices_by_name, + 1, datamovement__data_movement_list_request__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_list_request__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCFieldDescriptor datamovement__data_movement_list_response__field_descriptors[1] = +{ + { + "uids", + 1, + PROTOBUF_C_LABEL_REPEATED, + PROTOBUF_C_TYPE_STRING, + offsetof(Datamovement__DataMovementListResponse, n_uids), + offsetof(Datamovement__DataMovementListResponse, uids), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned datamovement__data_movement_list_response__field_indices_by_name[] = { + 0, /* field[0] = uids */ +}; +static const ProtobufCIntRange datamovement__data_movement_list_response__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 1 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_list_response__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementListResponse", + "DataMovementListResponse", + "Datamovement__DataMovementListResponse", + "datamovement", + sizeof(Datamovement__DataMovementListResponse), + 1, + datamovement__data_movement_list_response__field_descriptors, + datamovement__data_movement_list_response__field_indices_by_name, + 1, datamovement__data_movement_list_response__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_list_response__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCFieldDescriptor datamovement__data_movement_cancel_request__field_descriptors[2] = +{ + { + "workflow", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_MESSAGE, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCancelRequest, workflow), + &datamovement__data_movement_workflow__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "uid", + 2, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCancelRequest, uid), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned datamovement__data_movement_cancel_request__field_indices_by_name[] = { + 1, /* field[1] = uid */ + 0, /* field[0] = workflow */ +}; +static const ProtobufCIntRange datamovement__data_movement_cancel_request__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 2 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_cancel_request__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementCancelRequest", + "DataMovementCancelRequest", + "Datamovement__DataMovementCancelRequest", + "datamovement", + sizeof(Datamovement__DataMovementCancelRequest), + 2, + datamovement__data_movement_cancel_request__field_descriptors, + datamovement__data_movement_cancel_request__field_indices_by_name, + 1, datamovement__data_movement_cancel_request__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_cancel_request__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCEnumValue datamovement__data_movement_cancel_response__status__enum_values_by_number[4] = +{ + { "INVALID", "DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__INVALID", 0 }, + { "NOT_FOUND", "DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__NOT_FOUND", 1 }, + { "SUCCESS", "DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__SUCCESS", 2 }, + { "FAILED", "DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__FAILED", 3 }, +}; +static const ProtobufCIntRange datamovement__data_movement_cancel_response__status__value_ranges[] = { +{0, 0},{0, 4} +}; +static const ProtobufCEnumValueIndex datamovement__data_movement_cancel_response__status__enum_values_by_name[4] = +{ + { "FAILED", 3 }, + { "INVALID", 0 }, + { "NOT_FOUND", 1 }, + { "SUCCESS", 2 }, +}; +const ProtobufCEnumDescriptor datamovement__data_movement_cancel_response__status__descriptor = +{ + PROTOBUF_C__ENUM_DESCRIPTOR_MAGIC, + "datamovement.DataMovementCancelResponse.Status", + "Status", + "Datamovement__DataMovementCancelResponse__Status", + "datamovement", + 4, + datamovement__data_movement_cancel_response__status__enum_values_by_number, + 4, + datamovement__data_movement_cancel_response__status__enum_values_by_name, + 1, + datamovement__data_movement_cancel_response__status__value_ranges, + NULL,NULL,NULL,NULL /* reserved[1234] */ +}; +static const ProtobufCFieldDescriptor datamovement__data_movement_cancel_response__field_descriptors[2] = +{ + { + "status", + 1, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_ENUM, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCancelResponse, status), + &datamovement__data_movement_cancel_response__status__descriptor, + NULL, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, + { + "message", + 2, + PROTOBUF_C_LABEL_NONE, + PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Datamovement__DataMovementCancelResponse, message), + NULL, + &protobuf_c_empty_string, + 0, /* flags */ + 0,NULL,NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned datamovement__data_movement_cancel_response__field_indices_by_name[] = { + 1, /* field[1] = message */ + 0, /* field[0] = status */ +}; +static const ProtobufCIntRange datamovement__data_movement_cancel_response__number_ranges[1 + 1] = +{ + { 1, 0 }, + { 0, 2 } +}; +const ProtobufCMessageDescriptor datamovement__data_movement_cancel_response__descriptor = +{ + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "datamovement.DataMovementCancelResponse", + "DataMovementCancelResponse", + "Datamovement__DataMovementCancelResponse", + "datamovement", + sizeof(Datamovement__DataMovementCancelResponse), + 2, + datamovement__data_movement_cancel_response__field_descriptors, + datamovement__data_movement_cancel_response__field_indices_by_name, + 1, datamovement__data_movement_cancel_response__number_ranges, + (ProtobufCMessageInit) datamovement__data_movement_cancel_response__init, + NULL,NULL,NULL /* reserved[123] */ +}; +static const ProtobufCMethodDescriptor datamovement__data_mover__method_descriptors[6] = { + { "Version", &google__protobuf__empty__descriptor, &datamovement__data_movement_version_response__descriptor }, { "Create", &datamovement__data_movement_create_request__descriptor, &datamovement__data_movement_create_response__descriptor }, { "Status", &datamovement__data_movement_status_request__descriptor, &datamovement__data_movement_status_response__descriptor }, { "Delete", &datamovement__data_movement_delete_request__descriptor, &datamovement__data_movement_delete_response__descriptor }, + { "List", &datamovement__data_movement_list_request__descriptor, &datamovement__data_movement_list_response__descriptor }, + { "Cancel", &datamovement__data_movement_cancel_request__descriptor, &datamovement__data_movement_cancel_response__descriptor }, }; const unsigned datamovement__data_mover__method_indices_by_name[] = { - 0, /* Create */ - 2, /* Delete */ - 1 /* Status */ + 5, /* Cancel */ + 1, /* Create */ + 3, /* Delete */ + 4, /* List */ + 2, /* Status */ + 0 /* Version */ }; const ProtobufCServiceDescriptor datamovement__data_mover__descriptor = { @@ -770,17 +1538,25 @@ const ProtobufCServiceDescriptor datamovement__data_mover__descriptor = "DataMover", "Datamovement__DataMover", "datamovement", - 3, + 6, datamovement__data_mover__method_descriptors, datamovement__data_mover__method_indices_by_name }; +void datamovement__data_mover__version(ProtobufCService *service, + const Google__Protobuf__Empty *input, + Datamovement__DataMovementVersionResponse_Closure closure, + void *closure_data) +{ + assert(service->descriptor == &datamovement__data_mover__descriptor); + service->invoke(service, 0, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); +} void datamovement__data_mover__create(ProtobufCService *service, const Datamovement__DataMovementCreateRequest *input, Datamovement__DataMovementCreateResponse_Closure closure, void *closure_data) { assert(service->descriptor == &datamovement__data_mover__descriptor); - service->invoke(service, 0, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); + service->invoke(service, 1, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); } void datamovement__data_mover__status(ProtobufCService *service, const Datamovement__DataMovementStatusRequest *input, @@ -788,7 +1564,7 @@ void datamovement__data_mover__status(ProtobufCService *service, void *closure_data) { assert(service->descriptor == &datamovement__data_mover__descriptor); - service->invoke(service, 1, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); + service->invoke(service, 2, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); } void datamovement__data_mover__delete(ProtobufCService *service, const Datamovement__DataMovementDeleteRequest *input, @@ -796,7 +1572,23 @@ void datamovement__data_mover__delete(ProtobufCService *service, void *closure_data) { assert(service->descriptor == &datamovement__data_mover__descriptor); - service->invoke(service, 2, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); + service->invoke(service, 3, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); +} +void datamovement__data_mover__list(ProtobufCService *service, + const Datamovement__DataMovementListRequest *input, + Datamovement__DataMovementListResponse_Closure closure, + void *closure_data) +{ + assert(service->descriptor == &datamovement__data_mover__descriptor); + service->invoke(service, 4, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); +} +void datamovement__data_mover__cancel(ProtobufCService *service, + const Datamovement__DataMovementCancelRequest *input, + Datamovement__DataMovementCancelResponse_Closure closure, + void *closure_data) +{ + assert(service->descriptor == &datamovement__data_mover__descriptor); + service->invoke(service, 5, (const ProtobufCMessage *) input, (ProtobufCClosure) closure, closure_data); } void datamovement__data_mover__init (Datamovement__DataMover_Service *service, Datamovement__DataMover_ServiceDestroy destroy) diff --git a/daemons/compute/_client-c/datamovement.pb-c.h b/daemons/compute/_client-c/datamovement.pb-c.h index 0c5a8c0e..27c3deda 100644 --- a/daemons/compute/_client-c/datamovement.pb-c.h +++ b/daemons/compute/_client-c/datamovement.pb-c.h @@ -10,24 +10,33 @@ PROTOBUF_C__BEGIN_DECLS #if PROTOBUF_C_VERSION_NUMBER < 1003000 # error This file was generated by a newer version of protoc-c which is incompatible with your libprotobuf-c headers. Please update your headers. -#elif 1004000 < PROTOBUF_C_MIN_COMPILER_VERSION +#elif 1004001 < PROTOBUF_C_MIN_COMPILER_VERSION # error This file was generated by an older version of protoc-c which is incompatible with your libprotobuf-c headers. Please regenerate this file with a newer version of protoc-c. #endif +#include "google/protobuf/empty.pb-c.h" +typedef struct Datamovement__DataMovementVersionResponse Datamovement__DataMovementVersionResponse; +typedef struct Datamovement__DataMovementWorkflow Datamovement__DataMovementWorkflow; +typedef struct Datamovement__DataMovementCommandStatus Datamovement__DataMovementCommandStatus; typedef struct Datamovement__DataMovementCreateRequest Datamovement__DataMovementCreateRequest; typedef struct Datamovement__DataMovementCreateResponse Datamovement__DataMovementCreateResponse; typedef struct Datamovement__DataMovementStatusRequest Datamovement__DataMovementStatusRequest; typedef struct Datamovement__DataMovementStatusResponse Datamovement__DataMovementStatusResponse; typedef struct Datamovement__DataMovementDeleteRequest Datamovement__DataMovementDeleteRequest; typedef struct Datamovement__DataMovementDeleteResponse Datamovement__DataMovementDeleteResponse; +typedef struct Datamovement__DataMovementListRequest Datamovement__DataMovementListRequest; +typedef struct Datamovement__DataMovementListResponse Datamovement__DataMovementListResponse; +typedef struct Datamovement__DataMovementCancelRequest Datamovement__DataMovementCancelRequest; +typedef struct Datamovement__DataMovementCancelResponse Datamovement__DataMovementCancelResponse; /* --- enums --- */ typedef enum _Datamovement__DataMovementCreateResponse__Status { - DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__CREATED = 0, - DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__FAILED = 1 + DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__SUCCESS = 0, + DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__FAILED = 1, + DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__INVALID = 2 PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS) } Datamovement__DataMovementCreateResponse__Status; typedef enum _Datamovement__DataMovementStatusResponse__State { @@ -35,7 +44,8 @@ typedef enum _Datamovement__DataMovementStatusResponse__State { DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__STARTING = 1, DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__RUNNING = 2, DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__COMPLETED = 3, - DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__UNKNOWN_STATE = 4 + DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__CANCELLING = 4, + DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__UNKNOWN_STATE = 5 PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE) } Datamovement__DataMovementStatusResponse__State; typedef enum _Datamovement__DataMovementStatusResponse__Status { @@ -43,43 +53,101 @@ typedef enum _Datamovement__DataMovementStatusResponse__Status { DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__NOT_FOUND = 1, DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__SUCCESS = 2, DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__FAILED = 3, - DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__UNKNOWN_STATUS = 4 + DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__CANCELLED = 4, + DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__UNKNOWN_STATUS = 5 PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS) } Datamovement__DataMovementStatusResponse__Status; typedef enum _Datamovement__DataMovementDeleteResponse__Status { DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__INVALID = 0, DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__NOT_FOUND = 1, - DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__DELETED = 2, + DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__SUCCESS = 2, DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__ACTIVE = 3, DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__UNKNOWN = 4 PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS) } Datamovement__DataMovementDeleteResponse__Status; +typedef enum _Datamovement__DataMovementCancelResponse__Status { + DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__INVALID = 0, + DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__NOT_FOUND = 1, + DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__SUCCESS = 2, + DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__FAILED = 3 + PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS) +} Datamovement__DataMovementCancelResponse__Status; /* --- messages --- */ /* - * The data movemement create request message containing the source and destination files or directories. The + * The data movement version response returns information on the running data movement daemon. This includes + * the build version and a list of supported API versions. + */ +struct Datamovement__DataMovementVersionResponse +{ + ProtobufCMessage base; + char *version; + size_t n_apiversions; + int32_t *apiversions; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_VERSION_RESPONSE__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_version_response__descriptor) \ + , (char *)protobuf_c_empty_string, 0,NULL } + + +/* + * Data Movement workflow message contains identifying information for the workflow initiating the data movement + * operation. This message must be nested in all Request messages. + */ +struct Datamovement__DataMovementWorkflow +{ + ProtobufCMessage base; + char *name; + char *namespace_; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_WORKFLOW__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_workflow__descriptor) \ + , (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string } + + +/* + * Data Movement workflows contain a CommandStatus object in the Status object. This information + * relays the current state of the data movement progress. It includes the command, the progress + * percentage (e.g. 0-100%), elapsed time of the data movement (duration string, e.g. 5s), the last + * received message from the data movment command, and the time of that last message. + */ +struct Datamovement__DataMovementCommandStatus +{ + ProtobufCMessage base; + char *command; + int32_t progress; + char *elapsedtime; + char *lastmessage; + char *lastmessagetime; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_COMMAND_STATUS__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_command_status__descriptor) \ + , (char *)protobuf_c_empty_string, 0, (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string } + + +/* + * The data movement create request message containing the source and destination files or directories. The * NNF Data Mover will perform a copy from source to the destination. Specify dryrun to instantiate * a new data movement request where the copy is simulated and not executed */ struct Datamovement__DataMovementCreateRequest { ProtobufCMessage base; - char *workflow; - char *namespace_; + Datamovement__DataMovementWorkflow *workflow; char *source; char *destination; protobuf_c_boolean dryrun; }; #define DATAMOVEMENT__DATA_MOVEMENT_CREATE_REQUEST__INIT \ { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_create_request__descriptor) \ - , (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, 0 } + , NULL, (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, 0 } /* * The data movement create response message contains a unique identifier amont all data movement requests for * the lifetime of the active job. The UID can be used to query for status of the request using the - * data movememnt status request message. + * data movement status request message. */ struct Datamovement__DataMovementCreateResponse { @@ -90,7 +158,7 @@ struct Datamovement__DataMovementCreateResponse }; #define DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__INIT \ { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_create_response__descriptor) \ - , (char *)protobuf_c_empty_string, DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__CREATED, (char *)protobuf_c_empty_string } + , (char *)protobuf_c_empty_string, DATAMOVEMENT__DATA_MOVEMENT_CREATE_RESPONSE__STATUS__SUCCESS, (char *)protobuf_c_empty_string } /* @@ -100,12 +168,13 @@ struct Datamovement__DataMovementCreateResponse struct Datamovement__DataMovementStatusRequest { ProtobufCMessage base; + Datamovement__DataMovementWorkflow *workflow; char *uid; int64_t maxwaittime; }; #define DATAMOVEMENT__DATA_MOVEMENT_STATUS_REQUEST__INIT \ { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_status_request__descriptor) \ - , (char *)protobuf_c_empty_string, 0 } + , NULL, (char *)protobuf_c_empty_string, 0 } /* @@ -119,10 +188,11 @@ struct Datamovement__DataMovementStatusResponse Datamovement__DataMovementStatusResponse__State state; Datamovement__DataMovementStatusResponse__Status status; char *message; + Datamovement__DataMovementCommandStatus *commandstatus; }; #define DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__INIT \ { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_status_response__descriptor) \ - , DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__PENDING, DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__INVALID, (char *)protobuf_c_empty_string } + , DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATE__PENDING, DATAMOVEMENT__DATA_MOVEMENT_STATUS_RESPONSE__STATUS__INVALID, (char *)protobuf_c_empty_string, NULL } /* @@ -132,11 +202,12 @@ struct Datamovement__DataMovementStatusResponse struct Datamovement__DataMovementDeleteRequest { ProtobufCMessage base; + Datamovement__DataMovementWorkflow *workflow; char *uid; }; #define DATAMOVEMENT__DATA_MOVEMENT_DELETE_REQUEST__INIT \ { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_delete_request__descriptor) \ - , (char *)protobuf_c_empty_string } + , NULL, (char *)protobuf_c_empty_string } /* @@ -146,12 +217,132 @@ struct Datamovement__DataMovementDeleteResponse { ProtobufCMessage base; Datamovement__DataMovementDeleteResponse__Status status; + char *message; }; #define DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__INIT \ { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_delete_response__descriptor) \ - , DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__INVALID } + , DATAMOVEMENT__DATA_MOVEMENT_DELETE_RESPONSE__STATUS__INVALID, (char *)protobuf_c_empty_string } + + +/* + * The data movement list request allows a user to get a list of the current + * data movement requests. The user must supply the workflow and namespace + * which will be used to filter the data movement requests for retrieval. + */ +struct Datamovement__DataMovementListRequest +{ + ProtobufCMessage base; + Datamovement__DataMovementWorkflow *workflow; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_LIST_REQUEST__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_list_request__descriptor) \ + , NULL } + + +/* + * The data movement list response returns a list of the matching data movement + * requests' uids + */ +struct Datamovement__DataMovementListResponse +{ + ProtobufCMessage base; + size_t n_uids; + char **uids; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_LIST_RESPONSE__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_list_response__descriptor) \ + , 0,NULL } + + +/* + * The data movement cancel request permits users to iniatiate a cancellation of + * a data movement request that is in progress. + */ +struct Datamovement__DataMovementCancelRequest +{ + ProtobufCMessage base; + Datamovement__DataMovementWorkflow *workflow; + char *uid; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_CANCEL_REQUEST__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_cancel_request__descriptor) \ + , NULL, (char *)protobuf_c_empty_string } + + +/* + * The data movement cancel response returns the status of the cancel request. + * The cancel process will begin upon success. This response does not indicate + * that that cancel process has completed, but rather that it has been + * initiated. + */ +struct Datamovement__DataMovementCancelResponse +{ + ProtobufCMessage base; + Datamovement__DataMovementCancelResponse__Status status; + char *message; +}; +#define DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__INIT \ + { PROTOBUF_C_MESSAGE_INIT (&datamovement__data_movement_cancel_response__descriptor) \ + , DATAMOVEMENT__DATA_MOVEMENT_CANCEL_RESPONSE__STATUS__INVALID, (char *)protobuf_c_empty_string } +/* Datamovement__DataMovementVersionResponse methods */ +void datamovement__data_movement_version_response__init + (Datamovement__DataMovementVersionResponse *message); +size_t datamovement__data_movement_version_response__get_packed_size + (const Datamovement__DataMovementVersionResponse *message); +size_t datamovement__data_movement_version_response__pack + (const Datamovement__DataMovementVersionResponse *message, + uint8_t *out); +size_t datamovement__data_movement_version_response__pack_to_buffer + (const Datamovement__DataMovementVersionResponse *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementVersionResponse * + datamovement__data_movement_version_response__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_version_response__free_unpacked + (Datamovement__DataMovementVersionResponse *message, + ProtobufCAllocator *allocator); +/* Datamovement__DataMovementWorkflow methods */ +void datamovement__data_movement_workflow__init + (Datamovement__DataMovementWorkflow *message); +size_t datamovement__data_movement_workflow__get_packed_size + (const Datamovement__DataMovementWorkflow *message); +size_t datamovement__data_movement_workflow__pack + (const Datamovement__DataMovementWorkflow *message, + uint8_t *out); +size_t datamovement__data_movement_workflow__pack_to_buffer + (const Datamovement__DataMovementWorkflow *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementWorkflow * + datamovement__data_movement_workflow__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_workflow__free_unpacked + (Datamovement__DataMovementWorkflow *message, + ProtobufCAllocator *allocator); +/* Datamovement__DataMovementCommandStatus methods */ +void datamovement__data_movement_command_status__init + (Datamovement__DataMovementCommandStatus *message); +size_t datamovement__data_movement_command_status__get_packed_size + (const Datamovement__DataMovementCommandStatus *message); +size_t datamovement__data_movement_command_status__pack + (const Datamovement__DataMovementCommandStatus *message, + uint8_t *out); +size_t datamovement__data_movement_command_status__pack_to_buffer + (const Datamovement__DataMovementCommandStatus *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementCommandStatus * + datamovement__data_movement_command_status__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_command_status__free_unpacked + (Datamovement__DataMovementCommandStatus *message, + ProtobufCAllocator *allocator); /* Datamovement__DataMovementCreateRequest methods */ void datamovement__data_movement_create_request__init (Datamovement__DataMovementCreateRequest *message); @@ -266,8 +457,93 @@ Datamovement__DataMovementDeleteResponse * void datamovement__data_movement_delete_response__free_unpacked (Datamovement__DataMovementDeleteResponse *message, ProtobufCAllocator *allocator); +/* Datamovement__DataMovementListRequest methods */ +void datamovement__data_movement_list_request__init + (Datamovement__DataMovementListRequest *message); +size_t datamovement__data_movement_list_request__get_packed_size + (const Datamovement__DataMovementListRequest *message); +size_t datamovement__data_movement_list_request__pack + (const Datamovement__DataMovementListRequest *message, + uint8_t *out); +size_t datamovement__data_movement_list_request__pack_to_buffer + (const Datamovement__DataMovementListRequest *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementListRequest * + datamovement__data_movement_list_request__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_list_request__free_unpacked + (Datamovement__DataMovementListRequest *message, + ProtobufCAllocator *allocator); +/* Datamovement__DataMovementListResponse methods */ +void datamovement__data_movement_list_response__init + (Datamovement__DataMovementListResponse *message); +size_t datamovement__data_movement_list_response__get_packed_size + (const Datamovement__DataMovementListResponse *message); +size_t datamovement__data_movement_list_response__pack + (const Datamovement__DataMovementListResponse *message, + uint8_t *out); +size_t datamovement__data_movement_list_response__pack_to_buffer + (const Datamovement__DataMovementListResponse *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementListResponse * + datamovement__data_movement_list_response__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_list_response__free_unpacked + (Datamovement__DataMovementListResponse *message, + ProtobufCAllocator *allocator); +/* Datamovement__DataMovementCancelRequest methods */ +void datamovement__data_movement_cancel_request__init + (Datamovement__DataMovementCancelRequest *message); +size_t datamovement__data_movement_cancel_request__get_packed_size + (const Datamovement__DataMovementCancelRequest *message); +size_t datamovement__data_movement_cancel_request__pack + (const Datamovement__DataMovementCancelRequest *message, + uint8_t *out); +size_t datamovement__data_movement_cancel_request__pack_to_buffer + (const Datamovement__DataMovementCancelRequest *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementCancelRequest * + datamovement__data_movement_cancel_request__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_cancel_request__free_unpacked + (Datamovement__DataMovementCancelRequest *message, + ProtobufCAllocator *allocator); +/* Datamovement__DataMovementCancelResponse methods */ +void datamovement__data_movement_cancel_response__init + (Datamovement__DataMovementCancelResponse *message); +size_t datamovement__data_movement_cancel_response__get_packed_size + (const Datamovement__DataMovementCancelResponse *message); +size_t datamovement__data_movement_cancel_response__pack + (const Datamovement__DataMovementCancelResponse *message, + uint8_t *out); +size_t datamovement__data_movement_cancel_response__pack_to_buffer + (const Datamovement__DataMovementCancelResponse *message, + ProtobufCBuffer *buffer); +Datamovement__DataMovementCancelResponse * + datamovement__data_movement_cancel_response__unpack + (ProtobufCAllocator *allocator, + size_t len, + const uint8_t *data); +void datamovement__data_movement_cancel_response__free_unpacked + (Datamovement__DataMovementCancelResponse *message, + ProtobufCAllocator *allocator); /* --- per-message closures --- */ +typedef void (*Datamovement__DataMovementVersionResponse_Closure) + (const Datamovement__DataMovementVersionResponse *message, + void *closure_data); +typedef void (*Datamovement__DataMovementWorkflow_Closure) + (const Datamovement__DataMovementWorkflow *message, + void *closure_data); +typedef void (*Datamovement__DataMovementCommandStatus_Closure) + (const Datamovement__DataMovementCommandStatus *message, + void *closure_data); typedef void (*Datamovement__DataMovementCreateRequest_Closure) (const Datamovement__DataMovementCreateRequest *message, void *closure_data); @@ -286,6 +562,18 @@ typedef void (*Datamovement__DataMovementDeleteRequest_Closure) typedef void (*Datamovement__DataMovementDeleteResponse_Closure) (const Datamovement__DataMovementDeleteResponse *message, void *closure_data); +typedef void (*Datamovement__DataMovementListRequest_Closure) + (const Datamovement__DataMovementListRequest *message, + void *closure_data); +typedef void (*Datamovement__DataMovementListResponse_Closure) + (const Datamovement__DataMovementListResponse *message, + void *closure_data); +typedef void (*Datamovement__DataMovementCancelRequest_Closure) + (const Datamovement__DataMovementCancelRequest *message, + void *closure_data); +typedef void (*Datamovement__DataMovementCancelResponse_Closure) + (const Datamovement__DataMovementCancelResponse *message, + void *closure_data); /* --- services --- */ @@ -293,6 +581,10 @@ typedef struct Datamovement__DataMover_Service Datamovement__DataMover_Service; struct Datamovement__DataMover_Service { ProtobufCService base; + void (*version)(Datamovement__DataMover_Service *service, + const Google__Protobuf__Empty *input, + Datamovement__DataMovementVersionResponse_Closure closure, + void *closure_data); void (*create)(Datamovement__DataMover_Service *service, const Datamovement__DataMovementCreateRequest *input, Datamovement__DataMovementCreateResponse_Closure closure, @@ -305,6 +597,14 @@ struct Datamovement__DataMover_Service const Datamovement__DataMovementDeleteRequest *input, Datamovement__DataMovementDeleteResponse_Closure closure, void *closure_data); + void (*list)(Datamovement__DataMover_Service *service, + const Datamovement__DataMovementListRequest *input, + Datamovement__DataMovementListResponse_Closure closure, + void *closure_data); + void (*cancel)(Datamovement__DataMover_Service *service, + const Datamovement__DataMovementCancelRequest *input, + Datamovement__DataMovementCancelResponse_Closure closure, + void *closure_data); }; typedef void (*Datamovement__DataMover_ServiceDestroy)(Datamovement__DataMover_Service *); void datamovement__data_mover__init (Datamovement__DataMover_Service *service, @@ -313,9 +613,16 @@ void datamovement__data_mover__init (Datamovement__DataMover_Service *service, { &datamovement__data_mover__descriptor, protobuf_c_service_invoke_internal, NULL } #define DATAMOVEMENT__DATA_MOVER__INIT(function_prefix__) \ { DATAMOVEMENT__DATA_MOVER__BASE_INIT,\ + function_prefix__ ## version,\ function_prefix__ ## create,\ function_prefix__ ## status,\ - function_prefix__ ## delete } + function_prefix__ ## delete,\ + function_prefix__ ## list,\ + function_prefix__ ## cancel } +void datamovement__data_mover__version(ProtobufCService *service, + const Google__Protobuf__Empty *input, + Datamovement__DataMovementVersionResponse_Closure closure, + void *closure_data); void datamovement__data_mover__create(ProtobufCService *service, const Datamovement__DataMovementCreateRequest *input, Datamovement__DataMovementCreateResponse_Closure closure, @@ -328,9 +635,20 @@ void datamovement__data_mover__delete(ProtobufCService *service, const Datamovement__DataMovementDeleteRequest *input, Datamovement__DataMovementDeleteResponse_Closure closure, void *closure_data); +void datamovement__data_mover__list(ProtobufCService *service, + const Datamovement__DataMovementListRequest *input, + Datamovement__DataMovementListResponse_Closure closure, + void *closure_data); +void datamovement__data_mover__cancel(ProtobufCService *service, + const Datamovement__DataMovementCancelRequest *input, + Datamovement__DataMovementCancelResponse_Closure closure, + void *closure_data); /* --- descriptors --- */ +extern const ProtobufCMessageDescriptor datamovement__data_movement_version_response__descriptor; +extern const ProtobufCMessageDescriptor datamovement__data_movement_workflow__descriptor; +extern const ProtobufCMessageDescriptor datamovement__data_movement_command_status__descriptor; extern const ProtobufCMessageDescriptor datamovement__data_movement_create_request__descriptor; extern const ProtobufCMessageDescriptor datamovement__data_movement_create_response__descriptor; extern const ProtobufCEnumDescriptor datamovement__data_movement_create_response__status__descriptor; @@ -341,6 +659,11 @@ extern const ProtobufCEnumDescriptor datamovement__data_movement_status_respo extern const ProtobufCMessageDescriptor datamovement__data_movement_delete_request__descriptor; extern const ProtobufCMessageDescriptor datamovement__data_movement_delete_response__descriptor; extern const ProtobufCEnumDescriptor datamovement__data_movement_delete_response__status__descriptor; +extern const ProtobufCMessageDescriptor datamovement__data_movement_list_request__descriptor; +extern const ProtobufCMessageDescriptor datamovement__data_movement_list_response__descriptor; +extern const ProtobufCMessageDescriptor datamovement__data_movement_cancel_request__descriptor; +extern const ProtobufCMessageDescriptor datamovement__data_movement_cancel_response__descriptor; +extern const ProtobufCEnumDescriptor datamovement__data_movement_cancel_response__status__descriptor; extern const ProtobufCServiceDescriptor datamovement__data_mover__descriptor; PROTOBUF_C__END_DECLS diff --git a/daemons/compute/api/datamovement.proto b/daemons/compute/api/datamovement.proto index 561af2d0..37bde678 100644 --- a/daemons/compute/api/datamovement.proto +++ b/daemons/compute/api/datamovement.proto @@ -22,11 +22,17 @@ option java_multiple_files = true; option java_package = "com.hpe.cray.nnf.datamovement"; option java_outer_classname = "DataMovementProto"; +import "google/protobuf/empty.proto"; + package datamovement; // DataMover service definition describes the API for perform data movement // for NNF storage. service DataMover { + // Version sends a request to the data movement daemon and returns a response containing + // details on the current build version and supported API versions. + rpc Version (google.protobuf.Empty) returns (DataMovementVersionResponse) {} + // Create sends a new data movement request identified by source and destination fields. It returns // a response containing a unique identifier which can be used to query the status of the request. rpc Create (DataMovementCreateRequest) returns (DataMovementCreateResponse) {} @@ -47,6 +53,13 @@ service DataMover { rpc Cancel (DataMovementCancelRequest) returns (DataMovementCancelResponse) {} } +// The data movement version response returns information on the running data movement daemon. This includes +// the build version and a list of supported API versions. +message DataMovementVersionResponse { + string version = 1; + repeated string apiVersions = 2; +} + // Data Movement workflow message contains identifying information for the workflow initiating the data movement // operation. This message must be nested in all Request messages. message DataMovementWorkflow { diff --git a/daemons/compute/client-go/api/datamovement.pb.go b/daemons/compute/client-go/api/datamovement.pb.go index 870a04e5..bcf7ffe1 100644 --- a/daemons/compute/client-go/api/datamovement.pb.go +++ b/daemons/compute/client-go/api/datamovement.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.6 +// protoc v3.21.12 // source: api/datamovement.proto package api @@ -26,6 +26,7 @@ package api import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" sync "sync" ) @@ -83,7 +84,7 @@ func (x DataMovementCreateResponse_Status) Number() protoreflect.EnumNumber { // Deprecated: Use DataMovementCreateResponse_Status.Descriptor instead. func (DataMovementCreateResponse_Status) EnumDescriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{3, 0} + return file_api_datamovement_proto_rawDescGZIP(), []int{4, 0} } type DataMovementStatusResponse_State int32 @@ -141,7 +142,7 @@ func (x DataMovementStatusResponse_State) Number() protoreflect.EnumNumber { // Deprecated: Use DataMovementStatusResponse_State.Descriptor instead. func (DataMovementStatusResponse_State) EnumDescriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{5, 0} + return file_api_datamovement_proto_rawDescGZIP(), []int{6, 0} } type DataMovementStatusResponse_Status int32 @@ -199,7 +200,7 @@ func (x DataMovementStatusResponse_Status) Number() protoreflect.EnumNumber { // Deprecated: Use DataMovementStatusResponse_Status.Descriptor instead. func (DataMovementStatusResponse_Status) EnumDescriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{5, 1} + return file_api_datamovement_proto_rawDescGZIP(), []int{6, 1} } type DataMovementDeleteResponse_Status int32 @@ -254,7 +255,7 @@ func (x DataMovementDeleteResponse_Status) Number() protoreflect.EnumNumber { // Deprecated: Use DataMovementDeleteResponse_Status.Descriptor instead. func (DataMovementDeleteResponse_Status) EnumDescriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{7, 0} + return file_api_datamovement_proto_rawDescGZIP(), []int{8, 0} } type DataMovementCancelResponse_Status int32 @@ -306,7 +307,64 @@ func (x DataMovementCancelResponse_Status) Number() protoreflect.EnumNumber { // Deprecated: Use DataMovementCancelResponse_Status.Descriptor instead. func (DataMovementCancelResponse_Status) EnumDescriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{11, 0} + return file_api_datamovement_proto_rawDescGZIP(), []int{12, 0} +} + +// The data movement version response returns information on the running data movement daemon. This includes +// the build version and a list of supported API versions. +type DataMovementVersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + ApiVersions []string `protobuf:"bytes,2,rep,name=apiVersions,proto3" json:"apiVersions,omitempty"` +} + +func (x *DataMovementVersionResponse) Reset() { + *x = DataMovementVersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_api_datamovement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataMovementVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataMovementVersionResponse) ProtoMessage() {} + +func (x *DataMovementVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_datamovement_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataMovementVersionResponse.ProtoReflect.Descriptor instead. +func (*DataMovementVersionResponse) Descriptor() ([]byte, []int) { + return file_api_datamovement_proto_rawDescGZIP(), []int{0} +} + +func (x *DataMovementVersionResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *DataMovementVersionResponse) GetApiVersions() []string { + if x != nil { + return x.ApiVersions + } + return nil } // Data Movement workflow message contains identifying information for the workflow initiating the data movement @@ -323,7 +381,7 @@ type DataMovementWorkflow struct { func (x *DataMovementWorkflow) Reset() { *x = DataMovementWorkflow{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[0] + mi := &file_api_datamovement_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -336,7 +394,7 @@ func (x *DataMovementWorkflow) String() string { func (*DataMovementWorkflow) ProtoMessage() {} func (x *DataMovementWorkflow) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[0] + mi := &file_api_datamovement_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -349,7 +407,7 @@ func (x *DataMovementWorkflow) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementWorkflow.ProtoReflect.Descriptor instead. func (*DataMovementWorkflow) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{0} + return file_api_datamovement_proto_rawDescGZIP(), []int{1} } func (x *DataMovementWorkflow) GetName() string { @@ -385,7 +443,7 @@ type DataMovementCommandStatus struct { func (x *DataMovementCommandStatus) Reset() { *x = DataMovementCommandStatus{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[1] + mi := &file_api_datamovement_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -398,7 +456,7 @@ func (x *DataMovementCommandStatus) String() string { func (*DataMovementCommandStatus) ProtoMessage() {} func (x *DataMovementCommandStatus) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[1] + mi := &file_api_datamovement_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -411,7 +469,7 @@ func (x *DataMovementCommandStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementCommandStatus.ProtoReflect.Descriptor instead. func (*DataMovementCommandStatus) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{1} + return file_api_datamovement_proto_rawDescGZIP(), []int{2} } func (x *DataMovementCommandStatus) GetCommand() string { @@ -466,7 +524,7 @@ type DataMovementCreateRequest struct { func (x *DataMovementCreateRequest) Reset() { *x = DataMovementCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[2] + mi := &file_api_datamovement_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -479,7 +537,7 @@ func (x *DataMovementCreateRequest) String() string { func (*DataMovementCreateRequest) ProtoMessage() {} func (x *DataMovementCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[2] + mi := &file_api_datamovement_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -492,7 +550,7 @@ func (x *DataMovementCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementCreateRequest.ProtoReflect.Descriptor instead. func (*DataMovementCreateRequest) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{2} + return file_api_datamovement_proto_rawDescGZIP(), []int{3} } func (x *DataMovementCreateRequest) GetWorkflow() *DataMovementWorkflow { @@ -539,7 +597,7 @@ type DataMovementCreateResponse struct { func (x *DataMovementCreateResponse) Reset() { *x = DataMovementCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[3] + mi := &file_api_datamovement_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -552,7 +610,7 @@ func (x *DataMovementCreateResponse) String() string { func (*DataMovementCreateResponse) ProtoMessage() {} func (x *DataMovementCreateResponse) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[3] + mi := &file_api_datamovement_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -565,7 +623,7 @@ func (x *DataMovementCreateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementCreateResponse.ProtoReflect.Descriptor instead. func (*DataMovementCreateResponse) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{3} + return file_api_datamovement_proto_rawDescGZIP(), []int{4} } func (x *DataMovementCreateResponse) GetUid() string { @@ -604,7 +662,7 @@ type DataMovementStatusRequest struct { func (x *DataMovementStatusRequest) Reset() { *x = DataMovementStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[4] + mi := &file_api_datamovement_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -617,7 +675,7 @@ func (x *DataMovementStatusRequest) String() string { func (*DataMovementStatusRequest) ProtoMessage() {} func (x *DataMovementStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[4] + mi := &file_api_datamovement_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -630,7 +688,7 @@ func (x *DataMovementStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementStatusRequest.ProtoReflect.Descriptor instead. func (*DataMovementStatusRequest) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{4} + return file_api_datamovement_proto_rawDescGZIP(), []int{5} } func (x *DataMovementStatusRequest) GetWorkflow() *DataMovementWorkflow { @@ -671,7 +729,7 @@ type DataMovementStatusResponse struct { func (x *DataMovementStatusResponse) Reset() { *x = DataMovementStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[5] + mi := &file_api_datamovement_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -684,7 +742,7 @@ func (x *DataMovementStatusResponse) String() string { func (*DataMovementStatusResponse) ProtoMessage() {} func (x *DataMovementStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[5] + mi := &file_api_datamovement_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -697,7 +755,7 @@ func (x *DataMovementStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementStatusResponse.ProtoReflect.Descriptor instead. func (*DataMovementStatusResponse) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{5} + return file_api_datamovement_proto_rawDescGZIP(), []int{6} } func (x *DataMovementStatusResponse) GetState() DataMovementStatusResponse_State { @@ -742,7 +800,7 @@ type DataMovementDeleteRequest struct { func (x *DataMovementDeleteRequest) Reset() { *x = DataMovementDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[6] + mi := &file_api_datamovement_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -755,7 +813,7 @@ func (x *DataMovementDeleteRequest) String() string { func (*DataMovementDeleteRequest) ProtoMessage() {} func (x *DataMovementDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[6] + mi := &file_api_datamovement_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -768,7 +826,7 @@ func (x *DataMovementDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementDeleteRequest.ProtoReflect.Descriptor instead. func (*DataMovementDeleteRequest) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{6} + return file_api_datamovement_proto_rawDescGZIP(), []int{7} } func (x *DataMovementDeleteRequest) GetWorkflow() *DataMovementWorkflow { @@ -798,7 +856,7 @@ type DataMovementDeleteResponse struct { func (x *DataMovementDeleteResponse) Reset() { *x = DataMovementDeleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[7] + mi := &file_api_datamovement_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -811,7 +869,7 @@ func (x *DataMovementDeleteResponse) String() string { func (*DataMovementDeleteResponse) ProtoMessage() {} func (x *DataMovementDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[7] + mi := &file_api_datamovement_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -824,7 +882,7 @@ func (x *DataMovementDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementDeleteResponse.ProtoReflect.Descriptor instead. func (*DataMovementDeleteResponse) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{7} + return file_api_datamovement_proto_rawDescGZIP(), []int{8} } func (x *DataMovementDeleteResponse) GetStatus() DataMovementDeleteResponse_Status { @@ -855,7 +913,7 @@ type DataMovementListRequest struct { func (x *DataMovementListRequest) Reset() { *x = DataMovementListRequest{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[8] + mi := &file_api_datamovement_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -868,7 +926,7 @@ func (x *DataMovementListRequest) String() string { func (*DataMovementListRequest) ProtoMessage() {} func (x *DataMovementListRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[8] + mi := &file_api_datamovement_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -881,7 +939,7 @@ func (x *DataMovementListRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementListRequest.ProtoReflect.Descriptor instead. func (*DataMovementListRequest) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{8} + return file_api_datamovement_proto_rawDescGZIP(), []int{9} } func (x *DataMovementListRequest) GetWorkflow() *DataMovementWorkflow { @@ -904,7 +962,7 @@ type DataMovementListResponse struct { func (x *DataMovementListResponse) Reset() { *x = DataMovementListResponse{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[9] + mi := &file_api_datamovement_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -917,7 +975,7 @@ func (x *DataMovementListResponse) String() string { func (*DataMovementListResponse) ProtoMessage() {} func (x *DataMovementListResponse) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[9] + mi := &file_api_datamovement_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -930,7 +988,7 @@ func (x *DataMovementListResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementListResponse.ProtoReflect.Descriptor instead. func (*DataMovementListResponse) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{9} + return file_api_datamovement_proto_rawDescGZIP(), []int{10} } func (x *DataMovementListResponse) GetUids() []string { @@ -954,7 +1012,7 @@ type DataMovementCancelRequest struct { func (x *DataMovementCancelRequest) Reset() { *x = DataMovementCancelRequest{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[10] + mi := &file_api_datamovement_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -967,7 +1025,7 @@ func (x *DataMovementCancelRequest) String() string { func (*DataMovementCancelRequest) ProtoMessage() {} func (x *DataMovementCancelRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[10] + mi := &file_api_datamovement_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -980,7 +1038,7 @@ func (x *DataMovementCancelRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementCancelRequest.ProtoReflect.Descriptor instead. func (*DataMovementCancelRequest) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{10} + return file_api_datamovement_proto_rawDescGZIP(), []int{11} } func (x *DataMovementCancelRequest) GetWorkflow() *DataMovementWorkflow { @@ -1013,7 +1071,7 @@ type DataMovementCancelResponse struct { func (x *DataMovementCancelResponse) Reset() { *x = DataMovementCancelResponse{} if protoimpl.UnsafeEnabled { - mi := &file_api_datamovement_proto_msgTypes[11] + mi := &file_api_datamovement_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1026,7 +1084,7 @@ func (x *DataMovementCancelResponse) String() string { func (*DataMovementCancelResponse) ProtoMessage() {} func (x *DataMovementCancelResponse) ProtoReflect() protoreflect.Message { - mi := &file_api_datamovement_proto_msgTypes[11] + mi := &file_api_datamovement_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1039,7 +1097,7 @@ func (x *DataMovementCancelResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DataMovementCancelResponse.ProtoReflect.Descriptor instead. func (*DataMovementCancelResponse) Descriptor() ([]byte, []int) { - return file_api_datamovement_proto_rawDescGZIP(), []int{11} + return file_api_datamovement_proto_rawDescGZIP(), []int{12} } func (x *DataMovementCancelResponse) GetStatus() DataMovementCancelResponse_Status { @@ -1061,169 +1119,182 @@ var File_api_datamovement_proto protoreflect.FileDescriptor var file_api_datamovement_proto_rawDesc = []byte{ 0x0a, 0x16, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, - 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x48, 0x0a, 0x14, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, - 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6c, 0x61, 0x70, 0x73, - 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x73, - 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x22, 0xad, 0x01, 0x0a, 0x19, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3e, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x72, - 0x79, 0x72, 0x75, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x72, - 0x75, 0x6e, 0x22, 0xc1, 0x01, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x75, 0x69, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2e, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, - 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x22, 0x8f, 0x01, 0x0a, 0x19, 0x44, 0x61, 0x74, 0x61, 0x4d, - 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x59, 0x0a, 0x1b, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0b, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x48, + 0x0a, 0x14, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x44, 0x61, 0x74, + 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x20, 0x0a, 0x0b, + 0x65, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x65, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x20, + 0x0a, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x28, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xad, 0x01, 0x0a, 0x19, 0x44, + 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, + 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x22, 0xc1, 0x01, 0x0a, 0x1a, 0x44, + 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, + 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2e, + 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, + 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x22, 0x8f, + 0x01, 0x0a, 0x19, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x20, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0xd9, 0x03, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x44, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x61, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, + 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x05, 0x22, 0x60, 0x0a, 0x06, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, + 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, + 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, + 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x10, 0x05, 0x22, 0x6d, 0x0a, 0x19, + 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, + 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0xcb, 0x01, 0x0a, 0x1a, + 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, + 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4a, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, + 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x04, 0x22, 0x59, 0x0a, 0x17, 0x44, 0x61, 0x74, + 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x57, 0x61, 0x69, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, - 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xd9, 0x03, 0x0a, 0x1a, 0x44, 0x61, 0x74, - 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x4d, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, - 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0x61, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, - 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x0e, 0x0a, 0x0a, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, - 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x10, 0x05, 0x22, 0x60, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, - 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, - 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, - 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, - 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x10, 0x05, 0x22, 0x6d, 0x0a, 0x19, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x2e, 0x0a, 0x18, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x69, 0x64, 0x73, 0x22, 0x6d, 0x0a, 0x19, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x75, 0x69, 0x64, 0x22, 0xcb, 0x01, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x75, 0x69, 0x64, 0x22, 0xbe, 0x01, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4a, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, - 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, - 0x56, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x04, 0x22, 0x59, 0x0a, 0x17, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x2e, 0x0a, 0x18, - 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x69, 0x64, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x69, 0x64, 0x73, 0x22, 0x6d, 0x0a, 0x19, - 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, - 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0xbe, 0x01, 0x0a, 0x1a, - 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, - 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3d, 0x0a, - 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, - 0x49, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, - 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, - 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x32, 0xe0, 0x03, 0x0a, - 0x09, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x06, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x06, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, + 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, + 0x45, 0x44, 0x10, 0x03, 0x32, 0xb0, 0x04, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, + 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, - 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, + 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, - 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, - 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, - 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x5d, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, + 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5d, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, - 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x57, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x70, 0x65, 0x2e, 0x63, 0x72, 0x61, 0x79, 0x2e, - 0x6e, 0x6e, 0x66, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x42, 0x11, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x6e, 0x6e, 0x66, 0x2e, 0x63, 0x72, 0x61, 0x79, 0x2e, - 0x68, 0x70, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x57, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, + 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, + 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x57, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x68, + 0x70, 0x65, 0x2e, 0x63, 0x72, 0x61, 0x79, 0x2e, 0x6e, 0x6e, 0x66, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x11, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, + 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x6e, + 0x6e, 0x66, 0x2e, 0x63, 0x72, 0x61, 0x79, 0x2e, 0x68, 0x70, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x6d, 0x6f, 0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x70, 0x69, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1239,50 +1310,54 @@ func file_api_datamovement_proto_rawDescGZIP() []byte { } var file_api_datamovement_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_api_datamovement_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_api_datamovement_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_api_datamovement_proto_goTypes = []interface{}{ (DataMovementCreateResponse_Status)(0), // 0: datamovement.DataMovementCreateResponse.Status (DataMovementStatusResponse_State)(0), // 1: datamovement.DataMovementStatusResponse.State (DataMovementStatusResponse_Status)(0), // 2: datamovement.DataMovementStatusResponse.Status (DataMovementDeleteResponse_Status)(0), // 3: datamovement.DataMovementDeleteResponse.Status (DataMovementCancelResponse_Status)(0), // 4: datamovement.DataMovementCancelResponse.Status - (*DataMovementWorkflow)(nil), // 5: datamovement.DataMovementWorkflow - (*DataMovementCommandStatus)(nil), // 6: datamovement.DataMovementCommandStatus - (*DataMovementCreateRequest)(nil), // 7: datamovement.DataMovementCreateRequest - (*DataMovementCreateResponse)(nil), // 8: datamovement.DataMovementCreateResponse - (*DataMovementStatusRequest)(nil), // 9: datamovement.DataMovementStatusRequest - (*DataMovementStatusResponse)(nil), // 10: datamovement.DataMovementStatusResponse - (*DataMovementDeleteRequest)(nil), // 11: datamovement.DataMovementDeleteRequest - (*DataMovementDeleteResponse)(nil), // 12: datamovement.DataMovementDeleteResponse - (*DataMovementListRequest)(nil), // 13: datamovement.DataMovementListRequest - (*DataMovementListResponse)(nil), // 14: datamovement.DataMovementListResponse - (*DataMovementCancelRequest)(nil), // 15: datamovement.DataMovementCancelRequest - (*DataMovementCancelResponse)(nil), // 16: datamovement.DataMovementCancelResponse + (*DataMovementVersionResponse)(nil), // 5: datamovement.DataMovementVersionResponse + (*DataMovementWorkflow)(nil), // 6: datamovement.DataMovementWorkflow + (*DataMovementCommandStatus)(nil), // 7: datamovement.DataMovementCommandStatus + (*DataMovementCreateRequest)(nil), // 8: datamovement.DataMovementCreateRequest + (*DataMovementCreateResponse)(nil), // 9: datamovement.DataMovementCreateResponse + (*DataMovementStatusRequest)(nil), // 10: datamovement.DataMovementStatusRequest + (*DataMovementStatusResponse)(nil), // 11: datamovement.DataMovementStatusResponse + (*DataMovementDeleteRequest)(nil), // 12: datamovement.DataMovementDeleteRequest + (*DataMovementDeleteResponse)(nil), // 13: datamovement.DataMovementDeleteResponse + (*DataMovementListRequest)(nil), // 14: datamovement.DataMovementListRequest + (*DataMovementListResponse)(nil), // 15: datamovement.DataMovementListResponse + (*DataMovementCancelRequest)(nil), // 16: datamovement.DataMovementCancelRequest + (*DataMovementCancelResponse)(nil), // 17: datamovement.DataMovementCancelResponse + (*emptypb.Empty)(nil), // 18: google.protobuf.Empty } var file_api_datamovement_proto_depIdxs = []int32{ - 5, // 0: datamovement.DataMovementCreateRequest.workflow:type_name -> datamovement.DataMovementWorkflow + 6, // 0: datamovement.DataMovementCreateRequest.workflow:type_name -> datamovement.DataMovementWorkflow 0, // 1: datamovement.DataMovementCreateResponse.status:type_name -> datamovement.DataMovementCreateResponse.Status - 5, // 2: datamovement.DataMovementStatusRequest.workflow:type_name -> datamovement.DataMovementWorkflow + 6, // 2: datamovement.DataMovementStatusRequest.workflow:type_name -> datamovement.DataMovementWorkflow 1, // 3: datamovement.DataMovementStatusResponse.state:type_name -> datamovement.DataMovementStatusResponse.State 2, // 4: datamovement.DataMovementStatusResponse.status:type_name -> datamovement.DataMovementStatusResponse.Status - 6, // 5: datamovement.DataMovementStatusResponse.commandStatus:type_name -> datamovement.DataMovementCommandStatus - 5, // 6: datamovement.DataMovementDeleteRequest.workflow:type_name -> datamovement.DataMovementWorkflow + 7, // 5: datamovement.DataMovementStatusResponse.commandStatus:type_name -> datamovement.DataMovementCommandStatus + 6, // 6: datamovement.DataMovementDeleteRequest.workflow:type_name -> datamovement.DataMovementWorkflow 3, // 7: datamovement.DataMovementDeleteResponse.status:type_name -> datamovement.DataMovementDeleteResponse.Status - 5, // 8: datamovement.DataMovementListRequest.workflow:type_name -> datamovement.DataMovementWorkflow - 5, // 9: datamovement.DataMovementCancelRequest.workflow:type_name -> datamovement.DataMovementWorkflow + 6, // 8: datamovement.DataMovementListRequest.workflow:type_name -> datamovement.DataMovementWorkflow + 6, // 9: datamovement.DataMovementCancelRequest.workflow:type_name -> datamovement.DataMovementWorkflow 4, // 10: datamovement.DataMovementCancelResponse.status:type_name -> datamovement.DataMovementCancelResponse.Status - 7, // 11: datamovement.DataMover.Create:input_type -> datamovement.DataMovementCreateRequest - 9, // 12: datamovement.DataMover.Status:input_type -> datamovement.DataMovementStatusRequest - 11, // 13: datamovement.DataMover.Delete:input_type -> datamovement.DataMovementDeleteRequest - 13, // 14: datamovement.DataMover.List:input_type -> datamovement.DataMovementListRequest - 15, // 15: datamovement.DataMover.Cancel:input_type -> datamovement.DataMovementCancelRequest - 8, // 16: datamovement.DataMover.Create:output_type -> datamovement.DataMovementCreateResponse - 10, // 17: datamovement.DataMover.Status:output_type -> datamovement.DataMovementStatusResponse - 12, // 18: datamovement.DataMover.Delete:output_type -> datamovement.DataMovementDeleteResponse - 14, // 19: datamovement.DataMover.List:output_type -> datamovement.DataMovementListResponse - 16, // 20: datamovement.DataMover.Cancel:output_type -> datamovement.DataMovementCancelResponse - 16, // [16:21] is the sub-list for method output_type - 11, // [11:16] is the sub-list for method input_type + 18, // 11: datamovement.DataMover.Version:input_type -> google.protobuf.Empty + 8, // 12: datamovement.DataMover.Create:input_type -> datamovement.DataMovementCreateRequest + 10, // 13: datamovement.DataMover.Status:input_type -> datamovement.DataMovementStatusRequest + 12, // 14: datamovement.DataMover.Delete:input_type -> datamovement.DataMovementDeleteRequest + 14, // 15: datamovement.DataMover.List:input_type -> datamovement.DataMovementListRequest + 16, // 16: datamovement.DataMover.Cancel:input_type -> datamovement.DataMovementCancelRequest + 5, // 17: datamovement.DataMover.Version:output_type -> datamovement.DataMovementVersionResponse + 9, // 18: datamovement.DataMover.Create:output_type -> datamovement.DataMovementCreateResponse + 11, // 19: datamovement.DataMover.Status:output_type -> datamovement.DataMovementStatusResponse + 13, // 20: datamovement.DataMover.Delete:output_type -> datamovement.DataMovementDeleteResponse + 15, // 21: datamovement.DataMover.List:output_type -> datamovement.DataMovementListResponse + 17, // 22: datamovement.DataMover.Cancel:output_type -> datamovement.DataMovementCancelResponse + 17, // [17:23] is the sub-list for method output_type + 11, // [11:17] is the sub-list for method input_type 11, // [11:11] is the sub-list for extension type_name 11, // [11:11] is the sub-list for extension extendee 0, // [0:11] is the sub-list for field type_name @@ -1295,7 +1370,7 @@ func file_api_datamovement_proto_init() { } if !protoimpl.UnsafeEnabled { file_api_datamovement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementWorkflow); i { + switch v := v.(*DataMovementVersionResponse); i { case 0: return &v.state case 1: @@ -1307,7 +1382,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementCommandStatus); i { + switch v := v.(*DataMovementWorkflow); i { case 0: return &v.state case 1: @@ -1319,7 +1394,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementCreateRequest); i { + switch v := v.(*DataMovementCommandStatus); i { case 0: return &v.state case 1: @@ -1331,7 +1406,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementCreateResponse); i { + switch v := v.(*DataMovementCreateRequest); i { case 0: return &v.state case 1: @@ -1343,7 +1418,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementStatusRequest); i { + switch v := v.(*DataMovementCreateResponse); i { case 0: return &v.state case 1: @@ -1355,7 +1430,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementStatusResponse); i { + switch v := v.(*DataMovementStatusRequest); i { case 0: return &v.state case 1: @@ -1367,7 +1442,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementDeleteRequest); i { + switch v := v.(*DataMovementStatusResponse); i { case 0: return &v.state case 1: @@ -1379,7 +1454,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementDeleteResponse); i { + switch v := v.(*DataMovementDeleteRequest); i { case 0: return &v.state case 1: @@ -1391,7 +1466,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementListRequest); i { + switch v := v.(*DataMovementDeleteResponse); i { case 0: return &v.state case 1: @@ -1403,7 +1478,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementListResponse); i { + switch v := v.(*DataMovementListRequest); i { case 0: return &v.state case 1: @@ -1415,7 +1490,7 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataMovementCancelRequest); i { + switch v := v.(*DataMovementListResponse); i { case 0: return &v.state case 1: @@ -1427,6 +1502,18 @@ func file_api_datamovement_proto_init() { } } file_api_datamovement_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataMovementCancelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_datamovement_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DataMovementCancelResponse); i { case 0: return &v.state @@ -1445,7 +1532,7 @@ func file_api_datamovement_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_api_datamovement_proto_rawDesc, NumEnums: 5, - NumMessages: 12, + NumMessages: 13, NumExtensions: 0, NumServices: 1, }, diff --git a/daemons/compute/client-go/api/datamovement_grpc.pb.go b/daemons/compute/client-go/api/datamovement_grpc.pb.go index 28ce7778..94a4c6b0 100644 --- a/daemons/compute/client-go/api/datamovement_grpc.pb.go +++ b/daemons/compute/client-go/api/datamovement_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.6 +// - protoc v3.21.12 // source: api/datamovement.proto package api @@ -11,6 +11,7 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) // This is a compile-time assertion to ensure that this generated file @@ -22,6 +23,9 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type DataMoverClient interface { + // Version sends a request to the data movement daemon and returns a response containing + // details on the current build version and supported API versions. + Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DataMovementVersionResponse, error) // Create sends a new data movement request identified by source and destination fields. It returns // a response containing a unique identifier which can be used to query the status of the request. Create(ctx context.Context, in *DataMovementCreateRequest, opts ...grpc.CallOption) (*DataMovementCreateResponse, error) @@ -46,6 +50,15 @@ func NewDataMoverClient(cc grpc.ClientConnInterface) DataMoverClient { return &dataMoverClient{cc} } +func (c *dataMoverClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DataMovementVersionResponse, error) { + out := new(DataMovementVersionResponse) + err := c.cc.Invoke(ctx, "/datamovement.DataMover/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dataMoverClient) Create(ctx context.Context, in *DataMovementCreateRequest, opts ...grpc.CallOption) (*DataMovementCreateResponse, error) { out := new(DataMovementCreateResponse) err := c.cc.Invoke(ctx, "/datamovement.DataMover/Create", in, out, opts...) @@ -95,6 +108,9 @@ func (c *dataMoverClient) Cancel(ctx context.Context, in *DataMovementCancelRequ // All implementations must embed UnimplementedDataMoverServer // for forward compatibility type DataMoverServer interface { + // Version sends a request to the data movement daemon and returns a response containing + // details on the current build version and supported API versions. + Version(context.Context, *emptypb.Empty) (*DataMovementVersionResponse, error) // Create sends a new data movement request identified by source and destination fields. It returns // a response containing a unique identifier which can be used to query the status of the request. Create(context.Context, *DataMovementCreateRequest) (*DataMovementCreateResponse, error) @@ -116,6 +132,9 @@ type DataMoverServer interface { type UnimplementedDataMoverServer struct { } +func (UnimplementedDataMoverServer) Version(context.Context, *emptypb.Empty) (*DataMovementVersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} func (UnimplementedDataMoverServer) Create(context.Context, *DataMovementCreateRequest) (*DataMovementCreateResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") } @@ -144,6 +163,24 @@ func RegisterDataMoverServer(s grpc.ServiceRegistrar, srv DataMoverServer) { s.RegisterService(&DataMover_ServiceDesc, srv) } +func _DataMover_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataMoverServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/datamovement.DataMover/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataMoverServer).Version(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + func _DataMover_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DataMovementCreateRequest) if err := dec(in); err != nil { @@ -241,6 +278,10 @@ var DataMover_ServiceDesc = grpc.ServiceDesc{ ServiceName: "datamovement.DataMover", HandlerType: (*DataMoverServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _DataMover_Version_Handler, + }, { MethodName: "Create", Handler: _DataMover_Create_Handler, diff --git a/daemons/compute/client-go/main.go b/daemons/compute/client-go/main.go index d9dd1c75..cdd8ecad 100644 --- a/daemons/compute/client-go/main.go +++ b/daemons/compute/client-go/main.go @@ -28,6 +28,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" pb "github.com/NearNodeFlash/nnf-dm/daemons/compute/client-go/api" ) @@ -47,19 +48,6 @@ func main() { flag.Parse() - if len(*workflow) == 0 { - log.Printf("workflow name required") - os.Exit(1) - } - if len(*source) == 0 || len(*destination) == 0 { - log.Printf("source and destination required") - os.Exit(1) - } - if *count <= 0 { - log.Printf("count must be >= 1") - os.Exit(1) - } - socketAddr := "unix://" + *socket log.Printf("Connecting to %s", socketAddr) @@ -74,6 +62,27 @@ func main() { c := pb.NewDataMoverClient(conn) + versionResponse, err := versionRequest(ctx, c) + if err != nil { + log.Fatalf("could not retrieve version information: %v", err) + } + + log.Printf("Data Mover Version: %s\n", versionResponse.GetVersion()) + log.Printf("Data Mover Supported API Versions: %v\n", versionResponse.GetApiVersions()) + + if len(*workflow) == 0 { + log.Printf("workflow name required") + os.Exit(1) + } + if len(*source) == 0 || len(*destination) == 0 { + log.Printf("source and destination required") + os.Exit(1) + } + if *count <= 0 { + log.Printf("count must be >= 1") + os.Exit(1) + } + for i := 0; i < *count; i++ { log.Printf("Creating request %d of %d...", i+1, *count) @@ -161,6 +170,16 @@ func main() { } } +func versionRequest(ctx context.Context, client pb.DataMoverClient) (*pb.DataMovementVersionResponse, error) { + rsp, err := client.Version(ctx, &emptypb.Empty{}) + + if err != nil { + return nil, err + } + + return rsp, nil +} + func createRequest(ctx context.Context, client pb.DataMoverClient, workflow, namespace, source, destination string, dryrun bool) (*pb.DataMovementCreateResponse, error) { rsp, err := client.Create(ctx, &pb.DataMovementCreateRequest{ diff --git a/daemons/compute/client-py/datamovement_pb2.py b/daemons/compute/client-py/datamovement_pb2.py index 5eb18941..4928b00e 100644 --- a/daemons/compute/client-py/datamovement_pb2.py +++ b/daemons/compute/client-py/datamovement_pb2.py @@ -12,12 +12,14 @@ _sym_db = _symbol_database.Default() +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x64\x61tamovement.proto\x12\x0c\x64\x61tamovement\"7\n\x14\x44\x61taMovementWorkflow\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\x81\x01\n\x19\x44\x61taMovementCommandStatus\x12\x0f\n\x07\x63ommand\x18\x01 \x01(\t\x12\x10\n\x08progress\x18\x02 \x01(\x05\x12\x13\n\x0b\x65lapsedTime\x18\x03 \x01(\t\x12\x13\n\x0blastMessage\x18\x04 \x01(\t\x12\x17\n\x0flastMessageTime\x18\x05 \x01(\t\"\x86\x01\n\x19\x44\x61taMovementCreateRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x03 \x01(\t\x12\x0e\n\x06\x64ryrun\x18\x04 \x01(\x08\"\xab\x01\n\x1a\x44\x61taMovementCreateResponse\x12\x0b\n\x03uid\x18\x01 \x01(\t\x12?\n\x06status\x18\x02 \x01(\x0e\x32/.datamovement.DataMovementCreateResponse.Status\x12\x0f\n\x07message\x18\x03 \x01(\t\".\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\n\n\x06\x46\x41ILED\x10\x01\x12\x0b\n\x07INVALID\x10\x02\"s\n\x19\x44\x61taMovementStatusRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0b\n\x03uid\x18\x02 \x01(\t\x12\x13\n\x0bmaxWaitTime\x18\x03 \x01(\x03\"\xb2\x03\n\x1a\x44\x61taMovementStatusResponse\x12=\n\x05state\x18\x01 \x01(\x0e\x32..datamovement.DataMovementStatusResponse.State\x12?\n\x06status\x18\x02 \x01(\x0e\x32/.datamovement.DataMovementStatusResponse.Status\x12\x0f\n\x07message\x18\x03 \x01(\t\x12>\n\rcommandStatus\x18\x04 \x01(\x0b\x32\'.datamovement.DataMovementCommandStatus\"a\n\x05State\x12\x0b\n\x07PENDING\x10\x00\x12\x0c\n\x08STARTING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\x0e\n\nCANCELLING\x10\x04\x12\x11\n\rUNKNOWN_STATE\x10\x05\"`\n\x06Status\x12\x0b\n\x07INVALID\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04\x12\x12\n\x0eUNKNOWN_STATUS\x10\x05\"^\n\x19\x44\x61taMovementDeleteRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0b\n\x03uid\x18\x02 \x01(\t\"\xba\x01\n\x1a\x44\x61taMovementDeleteResponse\x12?\n\x06status\x18\x01 \x01(\x0e\x32/.datamovement.DataMovementDeleteResponse.Status\x12\x0f\n\x07message\x18\x02 \x01(\t\"J\n\x06Status\x12\x0b\n\x07INVALID\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\n\n\x06\x41\x43TIVE\x10\x03\x12\x0b\n\x07UNKNOWN\x10\x04\"O\n\x17\x44\x61taMovementListRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\"(\n\x18\x44\x61taMovementListResponse\x12\x0c\n\x04uids\x18\x01 \x03(\t\"^\n\x19\x44\x61taMovementCancelRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0b\n\x03uid\x18\x02 \x01(\t\"\xad\x01\n\x1a\x44\x61taMovementCancelResponse\x12?\n\x06status\x18\x01 \x01(\x0e\x32/.datamovement.DataMovementCancelResponse.Status\x12\x0f\n\x07message\x18\x02 \x01(\t\"=\n\x06Status\x12\x0b\n\x07INVALID\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x32\xe0\x03\n\tDataMover\x12]\n\x06\x43reate\x12\'.datamovement.DataMovementCreateRequest\x1a(.datamovement.DataMovementCreateResponse\"\x00\x12]\n\x06Status\x12\'.datamovement.DataMovementStatusRequest\x1a(.datamovement.DataMovementStatusResponse\"\x00\x12]\n\x06\x44\x65lete\x12\'.datamovement.DataMovementDeleteRequest\x1a(.datamovement.DataMovementDeleteResponse\"\x00\x12W\n\x04List\x12%.datamovement.DataMovementListRequest\x1a&.datamovement.DataMovementListResponse\"\x00\x12]\n\x06\x43\x61ncel\x12\'.datamovement.DataMovementCancelRequest\x1a(.datamovement.DataMovementCancelResponse\"\x00\x42W\n\x1d\x63om.hpe.cray.nnf.datamovementB\x11\x44\x61taMovementProtoP\x01Z!nnf.cray.hpe.com/datamovement/apib\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x64\x61tamovement.proto\x12\x0c\x64\x61tamovement\x1a\x1bgoogle/protobuf/empty.proto\"C\n\x1b\x44\x61taMovementVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x13\n\x0b\x61piVersions\x18\x02 \x03(\t\"7\n\x14\x44\x61taMovementWorkflow\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\x81\x01\n\x19\x44\x61taMovementCommandStatus\x12\x0f\n\x07\x63ommand\x18\x01 \x01(\t\x12\x10\n\x08progress\x18\x02 \x01(\x05\x12\x13\n\x0b\x65lapsedTime\x18\x03 \x01(\t\x12\x13\n\x0blastMessage\x18\x04 \x01(\t\x12\x17\n\x0flastMessageTime\x18\x05 \x01(\t\"\x86\x01\n\x19\x44\x61taMovementCreateRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x03 \x01(\t\x12\x0e\n\x06\x64ryrun\x18\x04 \x01(\x08\"\xab\x01\n\x1a\x44\x61taMovementCreateResponse\x12\x0b\n\x03uid\x18\x01 \x01(\t\x12?\n\x06status\x18\x02 \x01(\x0e\x32/.datamovement.DataMovementCreateResponse.Status\x12\x0f\n\x07message\x18\x03 \x01(\t\".\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\n\n\x06\x46\x41ILED\x10\x01\x12\x0b\n\x07INVALID\x10\x02\"s\n\x19\x44\x61taMovementStatusRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0b\n\x03uid\x18\x02 \x01(\t\x12\x13\n\x0bmaxWaitTime\x18\x03 \x01(\x03\"\xb2\x03\n\x1a\x44\x61taMovementStatusResponse\x12=\n\x05state\x18\x01 \x01(\x0e\x32..datamovement.DataMovementStatusResponse.State\x12?\n\x06status\x18\x02 \x01(\x0e\x32/.datamovement.DataMovementStatusResponse.Status\x12\x0f\n\x07message\x18\x03 \x01(\t\x12>\n\rcommandStatus\x18\x04 \x01(\x0b\x32\'.datamovement.DataMovementCommandStatus\"a\n\x05State\x12\x0b\n\x07PENDING\x10\x00\x12\x0c\n\x08STARTING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\x0e\n\nCANCELLING\x10\x04\x12\x11\n\rUNKNOWN_STATE\x10\x05\"`\n\x06Status\x12\x0b\n\x07INVALID\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04\x12\x12\n\x0eUNKNOWN_STATUS\x10\x05\"^\n\x19\x44\x61taMovementDeleteRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0b\n\x03uid\x18\x02 \x01(\t\"\xba\x01\n\x1a\x44\x61taMovementDeleteResponse\x12?\n\x06status\x18\x01 \x01(\x0e\x32/.datamovement.DataMovementDeleteResponse.Status\x12\x0f\n\x07message\x18\x02 \x01(\t\"J\n\x06Status\x12\x0b\n\x07INVALID\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\n\n\x06\x41\x43TIVE\x10\x03\x12\x0b\n\x07UNKNOWN\x10\x04\"O\n\x17\x44\x61taMovementListRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\"(\n\x18\x44\x61taMovementListResponse\x12\x0c\n\x04uids\x18\x01 \x03(\t\"^\n\x19\x44\x61taMovementCancelRequest\x12\x34\n\x08workflow\x18\x01 \x01(\x0b\x32\".datamovement.DataMovementWorkflow\x12\x0b\n\x03uid\x18\x02 \x01(\t\"\xad\x01\n\x1a\x44\x61taMovementCancelResponse\x12?\n\x06status\x18\x01 \x01(\x0e\x32/.datamovement.DataMovementCancelResponse.Status\x12\x0f\n\x07message\x18\x02 \x01(\t\"=\n\x06Status\x12\x0b\n\x07INVALID\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x32\xb0\x04\n\tDataMover\x12N\n\x07Version\x12\x16.google.protobuf.Empty\x1a).datamovement.DataMovementVersionResponse\"\x00\x12]\n\x06\x43reate\x12\'.datamovement.DataMovementCreateRequest\x1a(.datamovement.DataMovementCreateResponse\"\x00\x12]\n\x06Status\x12\'.datamovement.DataMovementStatusRequest\x1a(.datamovement.DataMovementStatusResponse\"\x00\x12]\n\x06\x44\x65lete\x12\'.datamovement.DataMovementDeleteRequest\x1a(.datamovement.DataMovementDeleteResponse\"\x00\x12W\n\x04List\x12%.datamovement.DataMovementListRequest\x1a&.datamovement.DataMovementListResponse\"\x00\x12]\n\x06\x43\x61ncel\x12\'.datamovement.DataMovementCancelRequest\x1a(.datamovement.DataMovementCancelResponse\"\x00\x42W\n\x1d\x63om.hpe.cray.nnf.datamovementB\x11\x44\x61taMovementProtoP\x01Z!nnf.cray.hpe.com/datamovement/apib\x06proto3') +_DATAMOVEMENTVERSIONRESPONSE = DESCRIPTOR.message_types_by_name['DataMovementVersionResponse'] _DATAMOVEMENTWORKFLOW = DESCRIPTOR.message_types_by_name['DataMovementWorkflow'] _DATAMOVEMENTCOMMANDSTATUS = DESCRIPTOR.message_types_by_name['DataMovementCommandStatus'] _DATAMOVEMENTCREATEREQUEST = DESCRIPTOR.message_types_by_name['DataMovementCreateRequest'] @@ -35,6 +37,13 @@ _DATAMOVEMENTSTATUSRESPONSE_STATUS = _DATAMOVEMENTSTATUSRESPONSE.enum_types_by_name['Status'] _DATAMOVEMENTDELETERESPONSE_STATUS = _DATAMOVEMENTDELETERESPONSE.enum_types_by_name['Status'] _DATAMOVEMENTCANCELRESPONSE_STATUS = _DATAMOVEMENTCANCELRESPONSE.enum_types_by_name['Status'] +DataMovementVersionResponse = _reflection.GeneratedProtocolMessageType('DataMovementVersionResponse', (_message.Message,), { + 'DESCRIPTOR' : _DATAMOVEMENTVERSIONRESPONSE, + '__module__' : 'datamovement_pb2' + # @@protoc_insertion_point(class_scope:datamovement.DataMovementVersionResponse) + }) +_sym_db.RegisterMessage(DataMovementVersionResponse) + DataMovementWorkflow = _reflection.GeneratedProtocolMessageType('DataMovementWorkflow', (_message.Message,), { 'DESCRIPTOR' : _DATAMOVEMENTWORKFLOW, '__module__' : 'datamovement_pb2' @@ -124,40 +133,42 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\035com.hpe.cray.nnf.datamovementB\021DataMovementProtoP\001Z!nnf.cray.hpe.com/datamovement/api' - _DATAMOVEMENTWORKFLOW._serialized_start=36 - _DATAMOVEMENTWORKFLOW._serialized_end=91 - _DATAMOVEMENTCOMMANDSTATUS._serialized_start=94 - _DATAMOVEMENTCOMMANDSTATUS._serialized_end=223 - _DATAMOVEMENTCREATEREQUEST._serialized_start=226 - _DATAMOVEMENTCREATEREQUEST._serialized_end=360 - _DATAMOVEMENTCREATERESPONSE._serialized_start=363 - _DATAMOVEMENTCREATERESPONSE._serialized_end=534 - _DATAMOVEMENTCREATERESPONSE_STATUS._serialized_start=488 - _DATAMOVEMENTCREATERESPONSE_STATUS._serialized_end=534 - _DATAMOVEMENTSTATUSREQUEST._serialized_start=536 - _DATAMOVEMENTSTATUSREQUEST._serialized_end=651 - _DATAMOVEMENTSTATUSRESPONSE._serialized_start=654 - _DATAMOVEMENTSTATUSRESPONSE._serialized_end=1088 - _DATAMOVEMENTSTATUSRESPONSE_STATE._serialized_start=893 - _DATAMOVEMENTSTATUSRESPONSE_STATE._serialized_end=990 - _DATAMOVEMENTSTATUSRESPONSE_STATUS._serialized_start=992 - _DATAMOVEMENTSTATUSRESPONSE_STATUS._serialized_end=1088 - _DATAMOVEMENTDELETEREQUEST._serialized_start=1090 - _DATAMOVEMENTDELETEREQUEST._serialized_end=1184 - _DATAMOVEMENTDELETERESPONSE._serialized_start=1187 - _DATAMOVEMENTDELETERESPONSE._serialized_end=1373 - _DATAMOVEMENTDELETERESPONSE_STATUS._serialized_start=1299 - _DATAMOVEMENTDELETERESPONSE_STATUS._serialized_end=1373 - _DATAMOVEMENTLISTREQUEST._serialized_start=1375 - _DATAMOVEMENTLISTREQUEST._serialized_end=1454 - _DATAMOVEMENTLISTRESPONSE._serialized_start=1456 - _DATAMOVEMENTLISTRESPONSE._serialized_end=1496 - _DATAMOVEMENTCANCELREQUEST._serialized_start=1498 - _DATAMOVEMENTCANCELREQUEST._serialized_end=1592 - _DATAMOVEMENTCANCELRESPONSE._serialized_start=1595 - _DATAMOVEMENTCANCELRESPONSE._serialized_end=1768 - _DATAMOVEMENTCANCELRESPONSE_STATUS._serialized_start=992 - _DATAMOVEMENTCANCELRESPONSE_STATUS._serialized_end=1053 - _DATAMOVER._serialized_start=1771 - _DATAMOVER._serialized_end=2251 + _DATAMOVEMENTVERSIONRESPONSE._serialized_start=65 + _DATAMOVEMENTVERSIONRESPONSE._serialized_end=132 + _DATAMOVEMENTWORKFLOW._serialized_start=134 + _DATAMOVEMENTWORKFLOW._serialized_end=189 + _DATAMOVEMENTCOMMANDSTATUS._serialized_start=192 + _DATAMOVEMENTCOMMANDSTATUS._serialized_end=321 + _DATAMOVEMENTCREATEREQUEST._serialized_start=324 + _DATAMOVEMENTCREATEREQUEST._serialized_end=458 + _DATAMOVEMENTCREATERESPONSE._serialized_start=461 + _DATAMOVEMENTCREATERESPONSE._serialized_end=632 + _DATAMOVEMENTCREATERESPONSE_STATUS._serialized_start=586 + _DATAMOVEMENTCREATERESPONSE_STATUS._serialized_end=632 + _DATAMOVEMENTSTATUSREQUEST._serialized_start=634 + _DATAMOVEMENTSTATUSREQUEST._serialized_end=749 + _DATAMOVEMENTSTATUSRESPONSE._serialized_start=752 + _DATAMOVEMENTSTATUSRESPONSE._serialized_end=1186 + _DATAMOVEMENTSTATUSRESPONSE_STATE._serialized_start=991 + _DATAMOVEMENTSTATUSRESPONSE_STATE._serialized_end=1088 + _DATAMOVEMENTSTATUSRESPONSE_STATUS._serialized_start=1090 + _DATAMOVEMENTSTATUSRESPONSE_STATUS._serialized_end=1186 + _DATAMOVEMENTDELETEREQUEST._serialized_start=1188 + _DATAMOVEMENTDELETEREQUEST._serialized_end=1282 + _DATAMOVEMENTDELETERESPONSE._serialized_start=1285 + _DATAMOVEMENTDELETERESPONSE._serialized_end=1471 + _DATAMOVEMENTDELETERESPONSE_STATUS._serialized_start=1397 + _DATAMOVEMENTDELETERESPONSE_STATUS._serialized_end=1471 + _DATAMOVEMENTLISTREQUEST._serialized_start=1473 + _DATAMOVEMENTLISTREQUEST._serialized_end=1552 + _DATAMOVEMENTLISTRESPONSE._serialized_start=1554 + _DATAMOVEMENTLISTRESPONSE._serialized_end=1594 + _DATAMOVEMENTCANCELREQUEST._serialized_start=1596 + _DATAMOVEMENTCANCELREQUEST._serialized_end=1690 + _DATAMOVEMENTCANCELRESPONSE._serialized_start=1693 + _DATAMOVEMENTCANCELRESPONSE._serialized_end=1866 + _DATAMOVEMENTCANCELRESPONSE_STATUS._serialized_start=1090 + _DATAMOVEMENTCANCELRESPONSE_STATUS._serialized_end=1151 + _DATAMOVER._serialized_start=1869 + _DATAMOVER._serialized_end=2429 # @@protoc_insertion_point(module_scope) diff --git a/daemons/compute/client-py/datamovement_pb2_grpc.py b/daemons/compute/client-py/datamovement_pb2_grpc.py index 6efa46a1..d4c29392 100644 --- a/daemons/compute/client-py/datamovement_pb2_grpc.py +++ b/daemons/compute/client-py/datamovement_pb2_grpc.py @@ -3,6 +3,7 @@ import grpc import datamovement_pb2 as datamovement__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class DataMoverStub(object): @@ -16,6 +17,11 @@ def __init__(self, channel): Args: channel: A grpc.Channel. """ + self.Version = channel.unary_unary( + '/datamovement.DataMover/Version', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=datamovement__pb2.DataMovementVersionResponse.FromString, + ) self.Create = channel.unary_unary( '/datamovement.DataMover/Create', request_serializer=datamovement__pb2.DataMovementCreateRequest.SerializeToString, @@ -48,6 +54,14 @@ class DataMoverServicer(object): for NNF storage. """ + def Version(self, request, context): + """Version sends a request to the data movement daemon and returns a response containing + details on the current build version and supported API versions. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def Create(self, request, context): """Create sends a new data movement request identified by source and destination fields. It returns a response containing a unique identifier which can be used to query the status of the request. @@ -90,6 +104,11 @@ def Cancel(self, request, context): def add_DataMoverServicer_to_server(servicer, server): rpc_method_handlers = { + 'Version': grpc.unary_unary_rpc_method_handler( + servicer.Version, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=datamovement__pb2.DataMovementVersionResponse.SerializeToString, + ), 'Create': grpc.unary_unary_rpc_method_handler( servicer.Create, request_deserializer=datamovement__pb2.DataMovementCreateRequest.FromString, @@ -127,6 +146,23 @@ class DataMover(object): for NNF storage. """ + @staticmethod + def Version(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/datamovement.DataMover/Version', + google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + datamovement__pb2.DataMovementVersionResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def Create(request, target, diff --git a/daemons/compute/lib-cpp/client.cc b/daemons/compute/lib-cpp/client.cc index 8b106463..34ba4248 100644 --- a/daemons/compute/lib-cpp/client.cc +++ b/daemons/compute/lib-cpp/client.cc @@ -48,6 +48,20 @@ DataMoverClient::~DataMoverClient() { delete static_cast(data_); } +RPCStatus DataMoverClient::Version(VersionResponse *response) { + auto client = static_cast(data_); + + auto request_ = ::google::protobuf::Empty(); + + grpc::ClientContext context; + grpc::Status status = client->stub_->Version(&context, + request_, + static_cast(response->data_) + ); + + return RPCStatus(status.ok(), status.error_code(), status.error_message()); +} + RPCStatus DataMoverClient::Create(const Workflow &workflow, const CreateRequest &request, CreateResponse *response) { auto client = static_cast(data_); @@ -154,6 +168,32 @@ Workflow::Workflow(std::string name, std::string namespace_) : namespace_(namespace_) { } +VersionResponse::VersionResponse() { + auto response = new datamovement::DataMovementVersionResponse(); + data_ = static_cast(response); +} + +VersionResponse::~VersionResponse() { + delete static_cast(data_); +} + +std::string VersionResponse::version() { + return static_cast(data_)->version(); +} + +std::vector VersionResponse::apiversions() { + auto response = static_cast(data_); + + auto apiVersions = std::vector(); + apiVersions.reserve(response->apiversions_size()); + + for (auto& v : response->apiversions()) { + apiVersions.push_back(v); + } + + return apiVersions; +} + CreateRequest::CreateRequest(std::string source, std::string destination) { auto request = new datamovement::DataMovementCreateRequest(); diff --git a/daemons/compute/lib-cpp/client.h b/daemons/compute/lib-cpp/client.h index c1188a53..e46bbc9e 100644 --- a/daemons/compute/lib-cpp/client.h +++ b/daemons/compute/lib-cpp/client.h @@ -29,6 +29,7 @@ namespace data_movement { class RPCStatus; class Workflow; +class VersionResponse; class CreateRequest; class CreateResponse; class StatusRequest; @@ -46,6 +47,7 @@ class DataMoverClient { DataMoverClient(const std::string &target); ~DataMoverClient(); + RPCStatus Version(VersionResponse *response); RPCStatus Create(const Workflow &workflow, const CreateRequest &request, CreateResponse *response); RPCStatus Status(const Workflow &workflow, const StatusRequest &request, StatusResponse *response); RPCStatus Cancel(const Workflow &workflow, const CancelRequest &request, CancelResponse *response); @@ -81,6 +83,19 @@ class Workflow { std::string namespace_; }; +class VersionResponse { + public: + VersionResponse(); + ~VersionResponse(); + + std::string version(); + std::vector apiversions(); + + private: + friend DataMoverClient; + + void *data_; +}; class CreateRequest { public: diff --git a/daemons/compute/lib-cpp/example/CMakeLists.txt b/daemons/compute/lib-cpp/example/CMakeLists.txt index 00fb0550..c2677bf5 100644 --- a/daemons/compute/lib-cpp/example/CMakeLists.txt +++ b/daemons/compute/lib-cpp/example/CMakeLists.txt @@ -9,5 +9,5 @@ add_executable(client "client.cc") target_link_directories(client BEFORE PRIVATE ../cmake/build/ ) -target_link_libraries(client libmylib.dylib) +target_link_libraries(client libnnfdm.dylib) target_include_directories(client PRIVATE ../) diff --git a/daemons/compute/lib-cpp/example/client.cc b/daemons/compute/lib-cpp/example/client.cc index 02f7f71e..21050216 100644 --- a/daemons/compute/lib-cpp/example/client.cc +++ b/daemons/compute/lib-cpp/example/client.cc @@ -30,6 +30,25 @@ int main(int argc, char** argv) { DataMoverClient client("unix:///tmp/nnf.sock"); + { + // Retrieve the version information + + VersionResponse versionResponse; + + RPCStatus status = client.Version(&versionResponse); + if (!status.ok()) { + std::cout << "Version RPC FAILED (" << status.error_code() << "): " << status.error_message() << std::endl; + return 1; + } + + std::cout << "Data Movement Version: " << versionResponse.version() << std::endl; + std::cout << "Supported API Versions:" << std::endl; + for (auto version : versionResponse.apiversions()) { + std::cout << "\t" << version << std::endl; + } + + } + // Allocate a workflow that will be used by all requests Workflow workflow("YOUR-WORKFLOW_NAME", "YOUR-WORKFLOW-NAMESPACE"); @@ -41,7 +60,7 @@ int main(int argc, char** argv) { RPCStatus status = client.Create(workflow, createRequest, &createResponse); if (!status.ok()) { - std::cout << "Create RPC FAILED" << status.error_code() << ": " << status.error_message() << std::endl; + std::cout << "Create RPC FAILED (" << status.error_code() << "): " << status.error_message() << std::endl; return 1; } @@ -62,7 +81,7 @@ int main(int argc, char** argv) { RPCStatus status = client.List(workflow, listRequest, &listResponse); if (!status.ok()) { - std::cout << "List RPC FAILED" << status.error_code() << ": " << status.error_message() << std::endl; + std::cout << "List RPC FAILED (" << status.error_code() << "): " << status.error_message() << std::endl; return 1; } @@ -79,7 +98,7 @@ int main(int argc, char** argv) { RequestStatus: RPCStatus status = client.Status(workflow, statusRequest, &statusResponse); if (!status.ok()) { - std::cout << "Status RPC FAILED" << status.error_code() << ": " << status.error_message() << std::endl; + std::cout << "Status RPC FAILED (" << status.error_code() << "): " << status.error_message() << std::endl; return 1; } @@ -113,7 +132,7 @@ int main(int argc, char** argv) { RPCStatus status = client.Cancel(workflow, cancelRequest, &cancelResponse); if (!status.ok()) { - std::cout << "Cancle RPC FAILED" << status.error_code() << ": " << status.error_message() << std::endl; + std::cout << "Cancel RPC FAILED (" << status.error_code() << "): " << status.error_message() << std::endl; return 1; } @@ -133,7 +152,7 @@ int main(int argc, char** argv) { RPCStatus status = client.Delete(workflow, deleteRequest, &deleteResponse); if (!status.ok()) { - std::cout << "Delete RPC FAILED" << status.error_code() << ": " << status.error_message() << std::endl; + std::cout << "Delete RPC FAILED (" << status.error_code() << "): " << status.error_message() << std::endl; return 1; } diff --git a/daemons/compute/server/main.go b/daemons/compute/server/main.go index 5aa845ca..ea2eb750 100644 --- a/daemons/compute/server/main.go +++ b/daemons/compute/server/main.go @@ -36,6 +36,7 @@ import ( "github.com/NearNodeFlash/nnf-dm/daemons/compute/server/auth" server "github.com/NearNodeFlash/nnf-dm/daemons/compute/server/servers" + "github.com/NearNodeFlash/nnf-dm/daemons/compute/server/version" ) const ( @@ -112,6 +113,8 @@ func (service *Service) Manage() (string, error) { func (service *Service) Run(srv server.Server, listener net.Listener) error { + stdlog.Println("Version:", version.BuildVersion()) + go func() { if err := srv.StartManager(); err != nil { errlog.Println("Manager Error: ", err) diff --git a/daemons/compute/server/servers/server_default.go b/daemons/compute/server/servers/server_default.go index 615b7071..d74492de 100644 --- a/daemons/compute/server/servers/server_default.go +++ b/daemons/compute/server/servers/server_default.go @@ -32,6 +32,7 @@ import ( "sync" "time" + "google.golang.org/protobuf/types/known/emptypb" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -58,6 +59,7 @@ import ( pb "github.com/NearNodeFlash/nnf-dm/daemons/compute/client-go/api" "github.com/NearNodeFlash/nnf-dm/daemons/compute/server/auth" + "github.com/NearNodeFlash/nnf-dm/daemons/compute/server/version" ) var ( @@ -270,6 +272,13 @@ func (r *dataMovementReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } +func (*defaultServer) Version(context.Context, *emptypb.Empty) (*pb.DataMovementVersionResponse, error) { + return &pb.DataMovementVersionResponse{ + Version: version.BuildVersion(), + ApiVersions: version.ApiVersions(), + }, nil +} + func (s *defaultServer) Create(ctx context.Context, req *pb.DataMovementCreateRequest) (*pb.DataMovementCreateResponse, error) { computeClientMount, computeMountInfo, err := s.findComputeMountInfo(ctx, req) diff --git a/daemons/compute/server/servers/server_sim.go b/daemons/compute/server/servers/server_sim.go index 614f0894..215ace36 100644 --- a/daemons/compute/server/servers/server_sim.go +++ b/daemons/compute/server/servers/server_sim.go @@ -25,7 +25,9 @@ import ( "time" pb "github.com/NearNodeFlash/nnf-dm/daemons/compute/client-go/api" + "github.com/NearNodeFlash/nnf-dm/daemons/compute/server/version" "github.com/google/uuid" + "google.golang.org/protobuf/types/known/emptypb" ) const ( @@ -57,6 +59,13 @@ func (s *simulatedServer) StartManager() error { return nil } +func (*simulatedServer) Version(context.Context, *emptypb.Empty) (*pb.DataMovementVersionResponse, error) { + return &pb.DataMovementVersionResponse{ + Version: version.BuildVersion(), + ApiVersions: version.ApiVersions(), + }, nil +} + func (s *simulatedServer) Create(ctx context.Context, req *pb.DataMovementCreateRequest) (*pb.DataMovementCreateResponse, error) { uid := uuid.New() diff --git a/daemons/compute/server/version/version.go b/daemons/compute/server/version/version.go new file mode 100644 index 00000000..378bef00 --- /dev/null +++ b/daemons/compute/server/version/version.go @@ -0,0 +1,46 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package version + +// The current data-mover version string. Clients can use this to identify particular +// information about the installed data-mover daemon. +var ( + // Version contains the current version of the data mover daemon. + version = "v0.0.1" + + // Commit Hash is the shortened git commit of the data mover daemon. This value is patched + // if using the "build-daemon" make target, and "unknown" otherwise. + commitHash = "unknown" + + // API Versions defines the list of API endpinds supported by the data-mover daemon. + // If the protobuf API changes that is not backwards compatible, a new version should be introduced. + // + // All API endpoints are part of the same version, so a new CreateV2 endpoint would cause all endpoints + // to transition to "V2" (StatusV2, DeleteV2, etc) even if no modifications are made. + // + // This value is an array in case a particular API introduced has a defect. For example, if a new API + // is introduced "v3-beta", but found to be defective, that particular API can be removed and "v3" added + // once the defect is resolved and the v3 API finalized. + apiVersions = []string{"1"} +) + +func BuildVersion() string { return version + "-" + commitHash } + +func ApiVersions() []string { return apiVersions } diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 00000000..e7fcea31 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,168 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, + 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 840b4b9b..ff2bfa1d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -25,8 +25,6 @@ github.com/Azure/go-autorest/tracing github.com/HewlettPackard/dws/api/v1alpha1 github.com/HewlettPackard/dws/utils/dwdparse github.com/HewlettPackard/dws/utils/updater -# github.com/HewlettPackard/lustre-csi-driver v0.0.0-20220623192103-4ce53adacc95 -## explicit; go 1.17 # github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988 ## explicit; go 1.19 github.com/NearNodeFlash/lustre-fs-operator/api/v1alpha1 @@ -44,8 +42,6 @@ github.com/beorn7/perks/quantile # github.com/cespare/xxhash/v2 v2.1.2 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/container-storage-interface/spec v1.5.0 -## explicit; go 1.16 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -387,6 +383,7 @@ google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/timestamppb # gopkg.in/inf.v0 v0.9.1 ## explicit From 2ffaee47d2a665c114b7d381c4f5b41b7ae7a375 Mon Sep 17 00:00:00 2001 From: matthew-richerson <82597529+matthew-richerson@users.noreply.github.com> Date: Tue, 21 Feb 2023 08:52:16 -0600 Subject: [PATCH 04/24] Github-84: Add commit hash to RPM builds (#87) * Github-84: Add commit hash to RPM builds Use the GITHUB_SHA environment variable provided in the github action and save it in the file .commit. The RPM build will use the value in this file instead of the output provided by the git command in the Makefile. This allows the git hash to be set when building from the source RPM. Signed-off-by: Matt Richerson * Truncate GITHUB_SHA to 8 characters Signed-off-by: Matt Richerson --------- Signed-off-by: Matt Richerson --- .github/workflows/rpm_build.yml | 1 + Makefile | 2 +- daemons/compute/server/nnf-dm.spec | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rpm_build.yml b/.github/workflows/rpm_build.yml index c99a7c41..3207dc24 100644 --- a/.github/workflows/rpm_build.yml +++ b/.github/workflows/rpm_build.yml @@ -22,6 +22,7 @@ jobs: dnf install -y rpm-build rpmdevtools git make dnf module -y install go-toolset rpmdev-setuptree + echo $GITHUB_SHA | cut -c1-8 > .commit tar -czf /github/home/rpmbuild/SOURCES/nnf-datamovement-1.0.tar.gz --transform 's,^,nnf-datamovement-1.0/,' . - name: build rpms run: rpmbuild -ba daemons/compute/server/nnf-dm.spec diff --git a/Makefile b/Makefile index 134b5e88..a5462eaa 100644 --- a/Makefile +++ b/Makefile @@ -120,7 +120,7 @@ container-unit-test: .version ## Run tests inside a container image ##@ Build -build-daemon: COMMIT_HASH = $(shell git rev-parse --short HEAD) +build-daemon: COMMIT_HASH?=$(shell git rev-parse --short HEAD) build-daemon: PACKAGE = github.com/NearNodeFlash/nnf-dm/daemons/compute/server/version build-daemon: manifests generate fmt vet ## Build standalone nnf-datamovement daemon GOOS=linux GOARCH=amd64 go build -ldflags="-X '$(PACKAGE).commitHash=$(COMMIT_HASH)'" -o bin/nnf-dm daemons/compute/server/main.go diff --git a/daemons/compute/server/nnf-dm.spec b/daemons/compute/server/nnf-dm.spec index dd37a543..dee1ccb7 100644 --- a/daemons/compute/server/nnf-dm.spec +++ b/daemons/compute/server/nnf-dm.spec @@ -21,7 +21,7 @@ Near Node Flash data movement through the data movement API. %setup -q %build -make build-daemon +COMMIT_HASH=$(cat .commit) make build-daemon %install mkdir -p %{buildroot}/usr/bin/ From 1f8e8b0ba4dfa6f75b66dbea802a6fca73be1875 Mon Sep 17 00:00:00 2001 From: Blake Devcich <89158881+bdevcich-hpe@users.noreply.github.com> Date: Mon, 6 Mar 2023 13:57:21 -0600 Subject: [PATCH 05/24] Data Movement: Allow use of hostfiles + revamped Configuration (#89) This allows for the configuration of slots and max_slots in the mpirun hostfile. Coupled with this is the slimmed down configuration which allows for more flexibility. Users now specify the full data movement command in the ConfigMap and use $VAR substitution. Profiles have also been introduced in the ConfigMap. However, only the `default` can be used at this time. Changelog: * Removed the use of mpirun --host. Instead, create hostfiles and use mpirun --hostfile * Simplified the DM command to be more flexible and use $VAR substitution for built-ins (i.e. hostfile name, gid/uid, src/dest). * Changed DM ConfigMap to allow for nested values for profiles. * ConfigMap values are now typed. * DM ConfigMap is now part of the config/default deployment. * Change the nnf-dm-config ConfigMap namespace from default to nnf-dm-system Signed-off-by: Blake Devcich --- config/default/kustomization.yaml | 1 + config/dm_config/kustomization.yaml | 7 +- config/dm_config/nnf-dm-config.env | 12 - config/dm_config/nnf-dm-config.yaml | 30 + controllers/datamovement_controller.go | 196 +- controllers/datamovement_controller_test.go | 1066 +-- .../compute/server/servers/server_default.go | 16 +- deploy.sh | 2 - go.mod | 27 +- go.sum | 36 + main.go | 3 +- .../dws/api/v1alpha1/clientmount_types.go | 9 + .../api/v1alpha1/directivebreakdown_types.go | 12 +- .../persistentstorageinstance_types.go | 10 +- .../dws/api/v1alpha1/resource.go | 71 + .../dws/api/v1alpha1/storage_types.go | 73 +- .../dws/api/v1alpha1/storagepool_types.go | 63 - .../api/v1alpha1/systemconfiguration_types.go | 1 - .../dws/api/v1alpha1/workflow_types.go | 116 +- .../dws/api/v1alpha1/workflow_webhook.go | 126 +- .../dws/api/v1alpha1/zz_generated.deepcopy.go | 103 +- .../dws/utils/dwdparse/dwdparse.go | 281 +- .../utils/dwdparse/zz_generated.deepcopy.go | 2 +- .../dws/utils/updater/status_updater.go | 2 +- .../models/model_storage_v1_9_0_storage.go | 2 + .../nnf-sos/api/v1alpha1/nnf_access_types.go | 11 +- .../api/v1alpha1/nnf_datamovement_types.go | 5 +- .../api/v1alpha1/nnf_node_storage_types.go | 15 + .../nnf-sos/api/v1alpha1/nnf_node_types.go | 4 + .../api/v1alpha1/nnf_resource_status_type.go | 21 + .../nnf-sos/api/v1alpha1/nnf_storage_types.go | 13 +- .../api/v1alpha1/nnfcontainerprofile_types.go | 86 + .../api/v1alpha1/nnfstorageprofile_types.go | 117 +- .../api/v1alpha1/nnfstorageprofile_webhook.go | 6 +- .../nnf-sos/api/v1alpha1/workflow_helpers.go | 16 + .../api/v1alpha1/zz_generated.deepcopy.go | 208 +- .../bases/nnf.cray.hpe.com_nnfaccesses.yaml | 28 +- ...nnf.cray.hpe.com_nnfcontainerprofiles.yaml | 7264 +++++++++++++++++ .../crd/bases/nnf.cray.hpe.com_nnfnodes.yaml | 9 + .../nnf.cray.hpe.com_nnfnodestorages.yaml | 21 + .../nnf.cray.hpe.com_nnfstorageprofiles.yaml | 320 +- .../bases/nnf.cray.hpe.com_nnfstorages.yaml | 17 +- vendor/modules.txt | 48 +- 43 files changed, 9354 insertions(+), 1122 deletions(-) delete mode 100644 config/dm_config/nnf-dm-config.env create mode 100644 config/dm_config/nnf-dm-config.yaml create mode 100644 vendor/github.com/HewlettPackard/dws/api/v1alpha1/resource.go delete mode 100644 vendor/github.com/HewlettPackard/dws/api/v1alpha1/storagepool_types.go create mode 100644 vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfcontainerprofile_types.go create mode 100644 vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index e154e555..6db36997 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -16,6 +16,7 @@ bases: - ../crd - ../rbac - ../manager +- ../dm_config # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml # - ../webhook diff --git a/config/dm_config/kustomization.yaml b/config/dm_config/kustomization.yaml index e71869ae..ef7be497 100644 --- a/config/dm_config/kustomization.yaml +++ b/config/dm_config/kustomization.yaml @@ -5,7 +5,6 @@ generatorOptions: disableNameSuffixHash: true configMapGenerator: - - name: nnf-dm-config - namespace: default - envs: - - nnf-dm-config.env +- files: + - nnf-dm-config.yaml + name: config \ No newline at end of file diff --git a/config/dm_config/nnf-dm-config.env b/config/dm_config/nnf-dm-config.env deleted file mode 100644 index 8b1ea845..00000000 --- a/config/dm_config/nnf-dm-config.env +++ /dev/null @@ -1,12 +0,0 @@ -# The command to run to perform data movement. If empty or absent, use the default command. -dmCommand= - -# The interval at which to collect progress from the command performing data movement. -dmProgressInterval=5s - -# The interval (in seconds) to supply to the dcp --progress command to dump progress output. -dcpProgressInterval=1s - -# If dmCommand is empty or absent, then use this to override the number of processes -# used for data movement. If empty or absent, then use the default for this value. -dmNumProcesses= diff --git a/config/dm_config/nnf-dm-config.yaml b/config/dm_config/nnf-dm-config.yaml new file mode 100644 index 00000000..410193a4 --- /dev/null +++ b/config/dm_config/nnf-dm-config.yaml @@ -0,0 +1,30 @@ +# Each profile is capable of providing different configurations for data movement. +# Note: For now, only the default profile is supported. +profiles: + + # Default profile that is used for all data movement activity. + default: + # The number of slots specified in the MPI hostfile. A value less than 1 disables the use + # of slots in the hostfile. + slots: 8 + + # The number of max_slots specified in the MPI hostfile. A value less than 1 disables the use + # of max_slots in the hostfile. + maxSlots: 0 + + # The full command to execute data movement. $VARS are replaced by the nnf software. Available + # $VARS: + # HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + # slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` + # UID: User ID that is inherited from the Workflow + # GID: Group ID that is inherited from the Workflow + # SRC: source for the data movement + # DEST destination for the data movement + # default: command: mpirun --allow-run-as-root --hostfile $HOSTFILE dcp --progress 1 --uid $UID --gid $GID $SRC $DEST + command: mpirun --allow-run-as-root --hostfile $HOSTFILE dcp --progress 1 --uid $UID --gid $GID $SRC $DEST + +# NnfDataMovement resources have the ability to collect and store the progress percentage and the +# last few lines of output in the CommandStatus field. This number is used for the interval to collect +# the progress data. `dcp --progress N` must be included in the data movement command in order for +# progress to be collected. A value less than 1 disables this functionality. +progressIntervalSeconds: 5 \ No newline at end of file diff --git a/controllers/datamovement_controller.go b/controllers/datamovement_controller.go index d3ade244..e36f8223 100644 --- a/controllers/datamovement_controller.go +++ b/controllers/datamovement_controller.go @@ -20,6 +20,7 @@ package controllers import ( + "bufio" "bytes" "context" "errors" @@ -27,6 +28,7 @@ import ( "io" "os" "os/exec" + "path/filepath" "reflect" "regexp" "strconv" @@ -46,6 +48,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" dmv1alpha1 "github.com/NearNodeFlash/nnf-dm/api/v1alpha1" "github.com/NearNodeFlash/nnf-dm/controllers/metrics" @@ -59,19 +62,11 @@ const ( // DM ConfigMap Info configMapName = "nnf-dm-config" - configMapNamespace = corev1.NamespaceDefault + configMapNamespace = nnfv1alpha1.DataMovementNamespace // DM ConfigMap Data Keys - configMapKeyCmd = "dmCommand" - configMapKeyProgInterval = "dmProgressInterval" - configMapKeyDcpProgInterval = "dcpProgressInterval" - configMapKeyNumProcesses = "dmNumProcesses" - - // DM ConfigMap Default Values - configMapDefaultCmd = "" - configMapDefaultProgInterval = 5 * time.Second - configMapDefaultDcpProgInterval = 1 * time.Second - configMapDefaultNumProcess = 1 + configMapKeyData = "nnf-dm-config.yaml" + configMapKeyProfileDefault = "default" ) // Regex to scrape the progress output of the `dcp` command. Example output: @@ -95,6 +90,19 @@ type dataMovementCancelContext struct { cancel context.CancelFunc } +// Configuration that matches the nnf-dm-config ConfigMap +type dmConfig struct { + Profiles map[string]dmConfigProfile `yaml:"profiles"` + ProgressIntervalSeconds int `yaml:"progressIntervalSeconds,omitempty"` +} + +// Each profile can have different settings +type dmConfigProfile struct { + Slots int `yaml:"slots,omitempty"` + MaxSlots int `yaml:"maxSlots,omitempty"` + Command string `yaml:"command"` +} + // Invalid error is a non-recoverable error type that implies the Data Movement resource is invalid type invalidError struct { err error @@ -226,22 +234,40 @@ func (r *DataMovementReconciler) Reconcile(ctx context.Context, req ctrl.Request cancel: cancel, }) - // Pull configurable values from the ConfigMap + // Get DM Config map configMap := &corev1.ConfigMap{} if err := r.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: configMapNamespace}, configMap); err != nil { - log.Info("Config map not found - requeueing") - return ctrl.Result{}, err + log.Info("Config map not found - requeueing", "name", configMapName, "namespace", configMapNamespace) + return ctrl.Result{}, handleInvalidError(err) } log.Info("Config map found", "data", configMap.Data) - dmCmd := getCommand(configMap) - progressCollectInterval := getCollectionInterval(configMap) - dcpProgressInterval := getDcpProgressInterval(configMap) - numProcesses := getNumProcesses(configMap) + cfg := dmConfig{} + if err := yaml.Unmarshal([]byte(configMap.Data[configMapKeyData]), &cfg); err != nil { + log.Error(err, "error reading config map data") + return ctrl.Result{}, handleInvalidError(err) + } + log.Info("Config map unmarshalled", "config", cfg) + + // TODO: Allow use of non-default dm config profiles - for now only use the default. For copy + // offload API, we could create "fake" profiles and store those in the DM object based on the + // parameters supplied to the CreateRequest(). + // Ensure profile exists + profile, found := cfg.Profiles[configMapKeyProfileDefault] + if !found { + return ctrl.Result{}, handleInvalidError(fmt.Errorf( + "'%s' profile not found in config map", configMapKeyProfileDefault)) + } + log.Info("Using profile", "name", configMapKeyProfileDefault, "profile", profile) - // Set the command and arguments according to the values specified by the config map - cmdStr, cmdArgs := getCmdAndArgs(dmCmd, numProcesses, dcpProgressInterval, hosts, dm) - cmd := exec.CommandContext(ctxCancel, cmdStr, cmdArgs...) + cmdArgs, mpiHostfile, err := buildDMCommand(profile, hosts, dm) + if err != nil { + log.Error(err, "error building DM command") + return ctrl.Result{}, handleInvalidError(err) + } + + log.Info("MPI Hostfile preview", "first line", peekMpiHostfile(mpiHostfile)) + cmd := exec.CommandContext(ctxCancel, cmdArgs[0], cmdArgs[1:]...) // Record the start of the data movement operation now := metav1.NowMicro() @@ -273,6 +299,7 @@ func (r *DataMovementReconciler) Reconcile(ctx context.Context, req ctrl.Request // While the command is running, capture and process the output. Read lines until EOF to // ensure we have the latest output. Then use the last regex match to obtain the most recent // progress. + progressCollectInterval := time.Duration(cfg.ProgressIntervalSeconds) * time.Second if progressCollectionEnabled(progressCollectInterval) { go func() { var elapsed metav1.Duration @@ -377,6 +404,8 @@ func (r *DataMovementReconciler) Reconcile(ctx context.Context, req ctrl.Request log.Info("Completed Command", "cmdStatus", cmdStatus) } + os.RemoveAll(filepath.Dir(mpiHostfile)) + status := dm.Status.DeepCopy() err = retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -403,87 +432,96 @@ func (r *DataMovementReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } -// Build the command and arguments based on the values we get from the config map -func getCmdAndArgs(cmCommand string, cmNumProcesses int, progressInterval int, hosts []string, dm *nnfv1alpha1.NnfDataMovement) (string, []string) { - var cmd string - var args []string +func buildDMCommand(profile dmConfigProfile, hosts []string, dm *nnfv1alpha1.NnfDataMovement) ([]string, string, error) { + var hostfile string + var err error - // Allow the data movement command to be overridden via the config map for testing purposes - if len(cmCommand) > 0 { - cmdList := strings.Split(cmCommand, " ") - cmd = cmdList[0] - args = cmdList[1:] - } else { - numProcesses := len(hosts) + // Command provided via the configmap + providedCmd := profile.Command - // If data movement is on the rabbit namespace, use the config map value - if dm.Namespace == os.Getenv("NNF_NODE_NAME") { - numProcesses = cmNumProcesses - } + // Create MPI hostfile only if included in the provided command + if strings.Contains(providedCmd, "$HOSTFILE") { + slots := profile.Slots + maxSlots := profile.MaxSlots - cmd = "mpirun" - args = []string{ - "--allow-run-as-root", // required to be able to set dcp uid/gid - "-np", fmt.Sprintf("%d", numProcesses), - "--host", strings.Join(hosts, ","), - "dcp", - "--progress", fmt.Sprintf("%d", progressInterval), - "--uid", fmt.Sprintf("%d", dm.Spec.UserId), - "--gid", fmt.Sprintf("%d", dm.Spec.GroupId), - dm.Spec.Source.Path, dm.Spec.Destination.Path, + hostfile, err = createMpiHostfile(dm.Name, hosts, slots, maxSlots) + if err != nil { + return nil, "", fmt.Errorf("error creating MPI hostfile: %v", err) } } - return cmd, args + cmd := profile.Command + cmd = strings.ReplaceAll(cmd, "$HOSTFILE", hostfile) + cmd = strings.ReplaceAll(cmd, "$UID", fmt.Sprintf("%d", dm.Spec.UserId)) + cmd = strings.ReplaceAll(cmd, "$GID", fmt.Sprintf("%d", dm.Spec.GroupId)) + cmd = strings.ReplaceAll(cmd, "$SRC", dm.Spec.Source.Path) + cmd = strings.ReplaceAll(cmd, "$DEST", dm.Spec.Destination.Path) + + return strings.Split(cmd, " "), hostfile, nil } -func getCommand(config *corev1.ConfigMap) string { - cmd := configMapDefaultCmd +// Create an MPI Hostfile given a list of hosts, slots, and maxSlots. A temporary directory is +// created based on the DM Name. The hostfile is created inside of this directory. +func createMpiHostfile(dmName string, hosts []string, slots, maxSlots int) (string, error) { - if len(config.Data[configMapKeyCmd]) > 0 { - cmd = config.Data[configMapKeyCmd] + tmpdir := filepath.Join("/tmp", dmName) + if err := os.MkdirAll(tmpdir, 0755); err != nil { + return "", err } + hostfilePath := filepath.Join(tmpdir, "hostfile") - return cmd -} - -func getCollectionInterval(config *corev1.ConfigMap) time.Duration { - collectionInterval := configMapDefaultProgInterval - - if len(config.Data[configMapKeyProgInterval]) > 0 { - if parsedInterval, err := time.ParseDuration(config.Data[configMapKeyProgInterval]); err == nil { - collectionInterval = parsedInterval - } + f, err := os.Create(hostfilePath) + if err != nil { + return "", err } + defer f.Close() + + // $ cat my-hosts + // node0 slots=2 max_slots=20 + // node1 slots=2 max_slots=20 + // https://www.open-mpi.org/faq/?category=running#mpirun-hostfile + for _, host := range hosts { + if _, err := f.WriteString(host); err != nil { + return "", err + } - return collectionInterval -} + if slots > 0 { + _, err := f.WriteString(fmt.Sprintf(" slots=%d", slots)) + if err != nil { + return "", err + } + } -func getDcpProgressInterval(config *corev1.ConfigMap) int { - progressInterval := configMapDefaultDcpProgInterval + if maxSlots > 0 { + _, err := f.WriteString(fmt.Sprintf(" max_slots=%d", maxSlots)) + if err != nil { + return "", err + } + } - // DCP Progress Output Interval (int, in seconds) - if len(config.Data[configMapKeyDcpProgInterval]) > 0 { - if parsedInterval, err := time.ParseDuration(config.Data[configMapKeyDcpProgInterval]); err == nil { - progressInterval = parsedInterval + _, err = f.WriteString("\n") + if err != nil { + return "", err } } - // dcp progress needs to be in seconds - round and cast to int - progressInterval = progressInterval.Round(1 * time.Second) - return int(progressInterval / time.Second) + return hostfilePath, nil } -func getNumProcesses(config *corev1.ConfigMap) int { - numProcesses := configMapDefaultNumProcess +// Get the first line of the hostfile for verification +func peekMpiHostfile(hostfile string) string { + file, err := os.Open(hostfile) + if err != nil { + return "" + } + defer file.Close() - if len(config.Data[configMapKeyNumProcesses]) > 0 { - if parsedNum, err := strconv.Atoi(config.Data[configMapKeyNumProcesses]); err == nil { - numProcesses = parsedNum - } + str, err := bufio.NewReader(file).ReadString('\n') + if err != nil { + return "" } - return numProcesses + return str } func progressCollectionEnabled(collectInterval time.Duration) bool { diff --git a/controllers/datamovement_controller_test.go b/controllers/datamovement_controller_test.go index 8346fecc..07d82335 100644 --- a/controllers/datamovement_controller_test.go +++ b/controllers/datamovement_controller_test.go @@ -27,6 +27,7 @@ import ( "path/filepath" "time" + "github.com/NearNodeFlash/nnf-dm/api/v1alpha1" nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" @@ -35,10 +36,10 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/yaml" ) // This is dumped into a temporary file and then ran as a bash script. @@ -46,554 +47,571 @@ import ( //go:embed fakeProgressOutput.sh var progressOutputScript string -var _ = Describe("Data Movement Test" /*Ordered, (Ginkgo v2)*/, func() { - var dm *nnfv1alpha1.NnfDataMovement - var cm *corev1.ConfigMap - createCm := true - var tmpDir string - var srcPath string - var destPath string - const testLabelKey = "dm-test" - var testLabel string - - createFakeProgressScript := func() string { - f, err := os.CreateTemp(tmpDir, "fakeProgressOutput-*.sh") - Expect(err).ToNot(HaveOccurred()) - filePath := f.Name() - - bytes := []byte(progressOutputScript) - _, err = f.Write(bytes) - Expect(err).ToNot(HaveOccurred()) - - err = os.Chmod(filePath, 0755) - Expect(err).ToNot(HaveOccurred()) - - return filePath - } - - BeforeEach(func() { - var err error - createCm = true - testLabel = fmt.Sprintf("%s-%s", testLabelKey, uuid.NewString()) - - tmpDir, err = os.MkdirTemp("/tmp", "dm-test") - Expect(err).ToNot(HaveOccurred()) - - srcPath = filepath.Join(tmpDir, "src") - destPath = filepath.Join(tmpDir, "dest") - - cm = &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: configMapNamespace, - Labels: map[string]string{ - testLabelKey: testLabel, - }, - }, - Data: map[string]string{ - // Use a command that will pass instead of using the default of mpirun - which will fail. - // This is to ensure that our tests are less noisy on the output, as the mpirun will produce - // an error in the output. - configMapKeyCmd: "true", - configMapKeyProgInterval: "1s", - configMapKeyDcpProgInterval: "1s", - configMapKeyNumProcesses: "", - }, - } +var defaultCommand = "mpirun --allow-run-as-root --hostfile $HOSTFILE dcp --progress 1 --uid $UID --gid $GID $SRC $DEST" - dm = &nnfv1alpha1.NnfDataMovement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dm-test", - Namespace: corev1.NamespaceDefault, - Labels: map[string]string{ - testLabelKey: testLabel, - }, - }, - Spec: nnfv1alpha1.NnfDataMovementSpec{ - Source: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ - Path: srcPath, - }, - Destination: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ - Path: destPath, - }, - UserId: 0, - GroupId: 0, - Cancel: false, - }, - } - }) +var _ = Describe("Data Movement Test", func() { - JustBeforeEach(func() { - // Create CM and verify label - if createCm { - Expect(k8sClient.Create(context.TODO(), cm)).To(Succeed()) - Eventually(func(g Gomega) string { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(cm), cm)).To(Succeed()) - return cm.Labels[testLabelKey] - }).Should(Equal(testLabel)) - } + Describe("Reconciler Tests", func() { + var dm *nnfv1alpha1.NnfDataMovement + var cm *corev1.ConfigMap + var dmCfg *dmConfig + var dmCfgProfile dmConfigProfile + createCm := true + var tmpDir string + var srcPath string + var destPath string + const testLabelKey = "dm-test" + var testLabel string - // Create DM and verify label - Expect(k8sClient.Create(context.TODO(), dm)).To(Succeed()) - Eventually(func(g Gomega) string { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Labels[testLabelKey] - }).Should(Equal(testLabel)) + createFakeProgressScript := func() string { + f, err := os.CreateTemp(tmpDir, "fakeProgressOutput-*.sh") + Expect(err).ToNot(HaveOccurred()) + filePath := f.Name() - }) + bytes := []byte(progressOutputScript) + _, err = f.Write(bytes) + Expect(err).ToNot(HaveOccurred()) - AfterEach(func() { - // Remove datamovement - if dm != nil { - Expect(k8sClient.Delete(context.TODO(), dm)).To(Succeed()) - Eventually(func() error { - return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) - }).ShouldNot(Succeed()) - } + err = os.Chmod(filePath, 0755) + Expect(err).ToNot(HaveOccurred()) - // Remove configmap - if createCm { - Expect(k8sClient.Delete(context.TODO(), cm)).To(Succeed()) - Eventually(func() error { - return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(cm), cm) - }).ShouldNot(Succeed()) + return filePath } - // Delete tmpdir and its contents - Expect(os.RemoveAll(tmpDir)).To(Succeed()) - }) - - Context("when a data movement operation succeeds", func() { - It("should have a state and status of 'Finished' and 'Success'", func() { - Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status - }, "3s").Should(MatchFields(IgnoreExtras, Fields{ - "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), - "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), - })) - }) - }) - - Context("when the dm configmap has specified a dmCommand", func() { BeforeEach(func() { - cm.Data[configMapKeyCmd] = "/bin/ls -l" - }) - It("should use that command instead of the default mpirun", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(cm.Data[configMapKeyCmd])) - }) - }) - - Context("when the dm config map has specified a valid dmNumProcesses and the dm namespace matches NNF_NODE_NAME", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - cm.Data[configMapKeyNumProcesses] = "17" - os.Setenv("NNF_NODE_NAME", "default") // normally this isn't default, but we just want it to match - }) - - It("should use that number for the -np flag", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 17 --host localhost dcp --progress 1 --uid 0 --gid 0 %s %s", srcPath, destPath))) - }) - - AfterEach(func() { - os.Unsetenv("NNF_NODE_NAME") - }) - }) + var err error + createCm = true + testLabel = fmt.Sprintf("%s-%s", testLabelKey, uuid.NewString()) - Context("when the dm config map has specified a valid dmNumProcesses and the dm namespace does not match NNF_NODE_NAME", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - cm.Data[configMapKeyNumProcesses] = "17" - }) - - It("should use len(hosts) (1) for the -np flag", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 1 --host localhost dcp --progress 1 --uid 0 --gid 0 %s %s", srcPath, destPath))) - }) - }) - - Context("when the dm config map has specified a invalid dmNumProcesses", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - cm.Data[configMapKeyNumProcesses] = "aa" - }) - - It("should use the default number for -np flag", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 1 --host localhost dcp --progress 1 --uid 0 --gid 0 %s %s", srcPath, destPath))) - }) - }) - - DescribeTable("Parsing values from the dm config map for dmProgressInterval", - func(durStr string, dur time.Duration) { - configMap := &corev1.ConfigMap{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: configMapNamespace}, configMap)).To(Succeed()) - // For this test, we don't have a direct way to verify the progress interval, so use getCollectionInterval directly - // instead of verifying full DM behavior. - configMap.Data[configMapKeyProgInterval] = durStr - dmProgressInterval := getCollectionInterval(configMap) - Expect(dmProgressInterval).To(Equal(dur)) - }, - Entry("when dmProgressInterval is valid", "25s", 25*time.Second), - Entry("when dmProgressInterval is invalid", "aafdafdsa", configMapDefaultProgInterval), - Entry("when dmProgressInterval is empty", "", configMapDefaultProgInterval), - ) - - Context("when the dm config map has specified a dmProgressInterval of less than 1s", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "sleep .5" - cm.Data[configMapKeyProgInterval] = "500ms" - }) - - It("the data movement should skip progress collection", func() { - - By("completing the data movement successfully") - Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status - }, "3s").Should(MatchFields(IgnoreExtras, Fields{ - "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), - "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), - })) - - By("verify that CommandStatus is empty") - Expect(dm.Status.CommandStatus.ProgressPercentage).To(BeNil()) - Expect(dm.Status.CommandStatus.LastMessage).To(BeEmpty()) - }) - }) - - Context("when the dm config map has specified a valid dcpProgressInterval", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - cm.Data[configMapKeyDcpProgInterval] = "7.12s" - }) - - It("should use that number for the dcp --progress option after rounding", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 1 --host localhost dcp --progress 7 --uid 0 --gid 0 %s %s", srcPath, destPath))) - }) - }) + tmpDir, err = os.MkdirTemp("/tmp", "dm-test") + Expect(err).ToNot(HaveOccurred()) - Context("when the dm config map has specified an invalid dcpProgressInterval", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - cm.Data[configMapKeyDcpProgInterval] = "aaa" - }) + srcPath = filepath.Join(tmpDir, "src") + destPath = filepath.Join(tmpDir, "dest") - It("should use the default number for the dcp --progress option", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command + // Ensure the DM namespace exists for the ConfigMap. Don't check the result because we + // can only create this once. Since this is an unordered container, we cannot use + // BeforeAll. Ignoring a Create() error is fine in this case. + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: v1alpha1.DataMovementNamespace, + }, + } + k8sClient.Create(context.TODO(), ns) + + // Default config map data + dmCfg = &dmConfig{ + Profiles: map[string]dmConfigProfile{ + "default": { + Command: defaultCommand, + }, + }, + ProgressIntervalSeconds: 1, + } + dmCfgProfile = dmCfg.Profiles[configMapKeyProfileDefault] + + dm = &nnfv1alpha1.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dm-test", + Namespace: corev1.NamespaceDefault, + Labels: map[string]string{ + testLabelKey: testLabel, + }, + }, + Spec: nnfv1alpha1.NnfDataMovementSpec{ + Source: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Path: srcPath, + }, + Destination: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Path: destPath, + }, + UserId: 0, + GroupId: 0, + Cancel: false, + }, + } + }) + + JustBeforeEach(func() { + // Create CM and verify label + if createCm { + // allow test to override the values in the default cfg profile + dmCfg.Profiles[configMapKeyProfileDefault] = dmCfgProfile + + // Convert the config to raw + b, err := yaml.Marshal(dmCfg) + Expect(err).ToNot(HaveOccurred()) + + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: v1alpha1.DataMovementNamespace, + Labels: map[string]string{ + testLabelKey: testLabel, + }, + }, + Data: map[string]string{ + configMapKeyData: string(b), + }, } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 1 --host localhost dcp --progress 1 --uid 0 --gid 0 %s %s", srcPath, destPath))) - }) - }) - Context("when the dm config map has specified an empty dcpProgressInterval", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - cm.Data[configMapKeyDcpProgInterval] = "" - }) + Expect(k8sClient.Create(context.TODO(), cm)).To(Succeed()) + Eventually(func(g Gomega) string { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + return cm.Labels[testLabelKey] + }).Should(Equal(testLabel)) + } - It("should use the default number for the dcp --progress option", func() { + // Create DM and verify label + Expect(k8sClient.Create(context.TODO(), dm)).To(Succeed()) Eventually(func(g Gomega) string { - cmd := "" g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 1 --host localhost dcp --progress 1 --uid 0 --gid 0 %s %s", srcPath, destPath))) - }) - }) - - Context("when there is no dm config map", func() { - BeforeEach(func() { - createCm = false - }) - - It("should requeue and not start running", func() { - Consistently(func(g Gomega) string { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status.State - }).ShouldNot(Equal(nnfv1alpha1.DataMovementConditionTypeRunning)) - }) - }) - - Context("when a data movement command fails", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "false" - }) - It("should have a State/Status of 'Finished'/'Failed'", func() { - Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status - }).Should(MatchFields(IgnoreExtras, Fields{ - "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), - "Status": Equal(nnfv1alpha1.DataMovementConditionReasonFailed), - })) - }) - }) - - Context("when a data movement is cancelled before it started", func() { - finalizer := "dm-test" + return dm.Labels[testLabelKey] + }).Should(Equal(testLabel)) - BeforeEach(func() { - // Set cancel on creation so that data movement doesn't start - dm.Spec.Cancel = true - cm.Data[configMapKeyCmd] = "" // try to use mpirun dcp, it will fail }) - It("should have a State/Status of 'Finished'/'Cancelled' and StartTime/EndTime should be set to now", func() { - nowMicro := metav1.NowMicro() - - By("Adding a finalizer to make sure the data movement stays around after delete") - Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { - k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) + AfterEach(func() { + // Remove datamovement + if dm != nil { + Expect(k8sClient.Delete(context.TODO(), dm)).To(Succeed()) + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) + }).ShouldNot(Succeed()) + } + + // Remove configmap + if createCm { + Expect(k8sClient.Delete(context.TODO(), cm)).To(Succeed()) + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(cm), cm) + }).ShouldNot(Succeed()) + } + + // Delete tmpdir and its contents + Expect(os.RemoveAll(tmpDir)).To(Succeed()) + }) + + Context("when a data movement operation succeeds", func() { + BeforeEach(func() { + dmCfgProfile.Command = "sleep 1" + }) + It("should have a state and status of 'Finished' and 'Success'", func() { + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }, "3s").Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), + })) + }) + }) + + Context("when a data movement command has no progress output", func() { + BeforeEach(func() { + dmCfgProfile.Command = "sleep 1" + }) + It("CommandStatus should not have a ProgressPercentage", func() { + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }, "3s").Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), + })) + + Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + Expect(dm.Status.CommandStatus.ProgressPercentage).To(BeNil()) + }) + }) + + Context("when the dm configmap has specified an overrideCmd", func() { + BeforeEach(func() { + dmCfgProfile.Command = "/bin/ls -l" + }) + It("should use that command instead of the default mpirun", func() { + Eventually(func(g Gomega) string { + cmd := "" + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil { + cmd = dm.Status.CommandStatus.Command + } + return cmd + }).Should(Equal(dmCfgProfile.Command)) + }) + }) + + Context("when the dm configmap does not have $HOSTFILE in the command", func() { + BeforeEach(func() { + dmCfgProfile.Command = "/bin/ls -l" + }) + It("should use that command instead of the default mpirun", func() { + Eventually(func(g Gomega) string { + cmd := "" + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil { + cmd = dm.Status.CommandStatus.Command + } + return cmd + }).Should(Equal(dmCfgProfile.Command)) + }) + }) + + Context("when the dm config map has specified a dmProgressInterval of less than 1s", func() { + BeforeEach(func() { + dmCfgProfile.Command = "sleep .5" + dmCfg.ProgressIntervalSeconds = 0 + }) + + It("the data movement should skip progress collection", func() { + + By("completing the data movement successfully") + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }, "3s").Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), + })) + + By("verify that CommandStatus is empty") + Expect(dm.Status.CommandStatus.ProgressPercentage).To(BeNil()) + Expect(dm.Status.CommandStatus.LastMessage).To(BeEmpty()) + }) + }) + + Context("when there is no dm config map", func() { + BeforeEach(func() { + createCm = false + }) + + It("should requeue and not start running", func() { + Consistently(func(g Gomega) string { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status.State + }).ShouldNot(Equal(nnfv1alpha1.DataMovementConditionTypeRunning)) + }) + }) + + Context("when a data movement command fails", func() { + BeforeEach(func() { + dmCfgProfile.Command = "false" + }) + It("should have a State/Status of 'Finished'/'Failed'", func() { + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }).Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonFailed), + })) + }) + }) + + Context("when a data movement is cancelled before it started", func() { + finalizer := "dm-test" + + BeforeEach(func() { + // Set cancel on creation so that data movement doesn't start dm.Spec.Cancel = true - dm.Finalizers = append(dm.Finalizers, finalizer) - return k8sClient.Update(context.TODO(), dm) - })).To(Succeed()) - - By("Attempting to delete the data movement so we get a DeletionTimestamp") - Expect(k8sClient.Delete(context.TODO(), dm)).To(Succeed()) - Eventually(func(g Gomega) *metav1.Time { - k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) - return dm.DeletionTimestamp - }, "10s").ShouldNot(BeNil()) - - By("Checking the Status fields") - Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status - }, "10s").Should(MatchFields(IgnoreExtras, Fields{ - "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), - "Status": Equal(nnfv1alpha1.DataMovementConditionReasonCancelled), - "StartTime": HaveField("Time", BeTemporally(">", nowMicro.Time)), - "EndTime": HaveField("Time", BeTemporally(">", nowMicro.Time)), - })) - }) - - JustAfterEach(func() { - // Remove the finalizer to ensure actual deletion - Eventually(func(g Gomega) bool { - k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) - if controllerutil.ContainsFinalizer(dm, finalizer) { - controllerutil.RemoveFinalizer(dm, finalizer) - } - k8sClient.Update(context.TODO(), dm) - k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) - return controllerutil.ContainsFinalizer(dm, finalizer) - }).Should(BeFalse()) - - dm = nil + dmCfgProfile.Command = "false" + }) + + It("should have a State/Status of 'Finished'/'Cancelled' and StartTime/EndTime should be set to now", func() { + nowMicro := metav1.NowMicro() + + By("Adding a finalizer to make sure the data movement stays around after delete") + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) + dm.Spec.Cancel = true + dm.Finalizers = append(dm.Finalizers, finalizer) + return k8sClient.Update(context.TODO(), dm) + })).To(Succeed()) + + By("Attempting to delete the data movement so we get a DeletionTimestamp") + Expect(k8sClient.Delete(context.TODO(), dm)).To(Succeed()) + Eventually(func(g Gomega) *metav1.Time { + k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) + return dm.DeletionTimestamp + }, "10s").ShouldNot(BeNil()) + + By("Checking the Status fields") + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }, "10s").Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonCancelled), + "StartTime": HaveField("Time", BeTemporally(">", nowMicro.Time)), + "EndTime": HaveField("Time", BeTemporally(">", nowMicro.Time)), + })) + }) + + JustAfterEach(func() { + // Remove the finalizer to ensure actual deletion + Eventually(func(g Gomega) bool { + k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) + if controllerutil.ContainsFinalizer(dm, finalizer) { + controllerutil.RemoveFinalizer(dm, finalizer) + } + k8sClient.Update(context.TODO(), dm) + k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) + return controllerutil.ContainsFinalizer(dm, finalizer) + }).Should(BeFalse()) + + dm = nil + }) + }) + + Context("when a data movement operation is running", func() { + var commandWithArgs string + commandDuration := 5 // total duration of the fake output script + commandIntervalInSec := 1.0 // interval to produce output + + BeforeEach(func() { + scriptFilePath := createFakeProgressScript() + _, err := os.Stat(scriptFilePath) + Expect(err).ToNot(HaveOccurred()) + + commandWithArgs = fmt.Sprintf("/bin/bash %s %d %f", scriptFilePath, commandDuration, commandIntervalInSec) + dmCfgProfile.Command = commandWithArgs + }) + + It("should update progress by parsing the output of the command", func() { + startTime := metav1.NowMicro() + + Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(dmCfgProfile.Command).To(Equal(commandWithArgs)) + + By("ensuring that we do not have a progress to start") + Eventually(func(g Gomega) *int32 { + var progress *int32 + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil { + progress = dm.Status.CommandStatus.ProgressPercentage + } + return progress + }).Should(BeNil()) + + By("ensuring that we extract a progress value between 0 and 100 (i.e. 10)") + Eventually(func(g Gomega) *int32 { + var progress *int32 + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil && dm.Status.CommandStatus.ProgressPercentage != nil { + progress = dm.Status.CommandStatus.ProgressPercentage + } + return progress + }, commandIntervalInSec*3).Should(SatisfyAll( + HaveValue(BeNumerically(">", 0)), + HaveValue(BeNumerically("<", 100)), + )) + + By("getting a progress of 100") + Eventually(func(g Gomega) *int32 { + var progress *int32 + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil && dm.Status.CommandStatus.ProgressPercentage != nil { + progress = dm.Status.CommandStatus.ProgressPercentage + } + return progress + }, commandDuration).Should(HaveValue(Equal(int32(100)))) + + endTime := metav1.NowMicro() + elapsedTime := time.Since(startTime.Time) + + By("ensuring state/status of 'Finished'/'Success'") + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }, commandIntervalInSec*3).Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), + })) + + By("ensuring the LastMessage contains the 100% done output") + Expect(dm.Status.CommandStatus.LastMessage).To(SatisfyAll( + ContainSubstring("100%"), + ContainSubstring("done"), + )) + + By("ensuring that the LastMessageTime is accurate") + Expect(dm.Status.CommandStatus.LastMessageTime.Time).To(SatisfyAll( + BeTemporally(">", startTime.Time), + BeTemporally("<=", endTime.Time), + )) + + By("ensuring that ElaspedTime is accurate") + Expect(dm.Status.CommandStatus.ElapsedTime.Duration).To(SatisfyAll( + BeNumerically(">", commandDuration/2), + BeNumerically("<=", elapsedTime), + )) + + GinkgoWriter.Printf("VERIFY: LastMessage: %s\n", dm.Status.CommandStatus.LastMessage) + GinkgoWriter.Printf("VERIFY: LastMessageTime: %v\n", dm.Status.CommandStatus.LastMessageTime) + GinkgoWriter.Printf("VERIFY: ElapsedTime: %v\n", dm.Status.CommandStatus.ElapsedTime) + }) + }) + + Context("when there are multiple lines of progress output to be read", func() { + BeforeEach(func() { + scriptFilePath := createFakeProgressScript() + _, err := os.Stat(scriptFilePath) + Expect(err).ToNot(HaveOccurred()) + + dmCfgProfile.Command = fmt.Sprintf("/bin/bash %s 10 .25", scriptFilePath) + dmCfg.ProgressIntervalSeconds = 1 + }) + + It("LastMessage should not include multiple lines of output", func() { + Consistently(func(g Gomega) string { + lastMessage := "" + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil { + lastMessage = dm.Status.CommandStatus.LastMessage + } + return lastMessage + }).ShouldNot(SatisfyAll( + BeEmpty(), + ContainSubstring("\n"), + )) + }) + }) + + Context("when a data movement operation is cancelled", func() { + BeforeEach(func() { + dmCfgProfile.Command = "sleep 5" + }) + It("should have a state and status of 'Finished' and 'Cancelled'", func() { + By("ensuring the data movement started") + Eventually(func(g Gomega) string { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status.State + }).Should(Equal(nnfv1alpha1.DataMovementConditionTypeRunning)) + + By("setting the cancel flag to true") + Eventually(func(g Gomega) error { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + dm.Spec.Cancel = true + return k8sClient.Update(context.TODO(), dm) + }).Should(Succeed()) + + By("verifying that it was cancelled successfully") + Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + return dm.Status + }).Should(MatchFields(IgnoreExtras, Fields{ + "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), + "Status": Equal(nnfv1alpha1.DataMovementConditionReasonCancelled), + })) + }) + }) + + Context("when the data movement operation has a set uid and gid", func() { + expectedGid := uint32(1000) + expectedUid := uint32(2000) + + BeforeEach(func() { + dm.Spec.GroupId = expectedGid + dm.Spec.UserId = expectedUid + }) + It("should use those in the data movement command", func() { + Eventually(func(g Gomega) string { + cmd := "" + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) + if dm.Status.CommandStatus != nil { + cmd = dm.Status.CommandStatus.Command + } + return cmd + }).Should(MatchRegexp(fmt.Sprintf( + "mpirun --allow-run-as-root --hostfile (.+)/([^/]+) dcp --progress 1 --uid %d --gid %d %s %s", + expectedUid, expectedGid, srcPath, destPath))) + }) }) }) - Context("when a data movement operation is running", func() { - var commandWithArgs string - commandDuration := 5 // total duration of the fake output script - commandIntervalInSec := 1.0 // interval to produce output - - BeforeEach(func() { - scriptFilePath := createFakeProgressScript() - _, err := os.Stat(scriptFilePath) - Expect(err).ToNot(HaveOccurred()) - - commandWithArgs = fmt.Sprintf("/bin/bash %s %d %f", scriptFilePath, commandDuration, commandIntervalInSec) - cm.Data[configMapKeyCmd] = commandWithArgs - }) - - It("should update progress by parsing the output of the command", func() { - startTime := metav1.NowMicro() - - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(cm), cm)).To(Succeed()) - Expect(cm.Data[configMapKeyCmd]).To(Equal(commandWithArgs)) - - By("ensuring that we do not have a progress to start") - Eventually(func(g Gomega) *int32 { - var progress *int32 - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - progress = dm.Status.CommandStatus.ProgressPercentage - } - return progress - }).Should(BeNil()) - - By("ensuring that we extract a progress value between 0 and 100 (i.e. 10)") - Eventually(func(g Gomega) *int32 { - var progress *int32 - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil && dm.Status.CommandStatus.ProgressPercentage != nil { - progress = dm.Status.CommandStatus.ProgressPercentage - } - return progress - }, commandIntervalInSec*3).Should(SatisfyAll( - HaveValue(BeNumerically(">", 0)), - HaveValue(BeNumerically("<", 100)), - )) - - By("getting a progress of 100") - Eventually(func(g Gomega) *int32 { - var progress *int32 - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil && dm.Status.CommandStatus.ProgressPercentage != nil { - progress = dm.Status.CommandStatus.ProgressPercentage - } - return progress - }, commandDuration).Should(HaveValue(Equal(int32(100)))) - - endTime := metav1.NowMicro() - elapsedTime := time.Since(startTime.Time) - - By("ensuring state/status of 'Finished'/'Success'") - Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status - }, commandIntervalInSec*3).Should(MatchFields(IgnoreExtras, Fields{ - "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), - "Status": Equal(nnfv1alpha1.DataMovementConditionReasonSuccess), - })) - - By("ensuring the LastMessage contains the 100% done output") - Expect(dm.Status.CommandStatus.LastMessage).To(SatisfyAll( - ContainSubstring("100%"), - ContainSubstring("done"), - )) - - By("ensuring that the LastMessageTime is accurate") - Expect(dm.Status.CommandStatus.LastMessageTime.Time).To(SatisfyAll( - BeTemporally(">", startTime.Time), - BeTemporally("<=", endTime.Time), - )) - - By("ensuring that ElaspedTime is accurate") - Expect(dm.Status.CommandStatus.ElapsedTime.Duration).To(SatisfyAll( - BeNumerically(">", commandDuration/2), - BeNumerically("<=", elapsedTime), - )) - - GinkgoWriter.Printf("VERIFY: LastMessage: %s\n", dm.Status.CommandStatus.LastMessage) - GinkgoWriter.Printf("VERIFY: LastMessageTime: %v\n", dm.Status.CommandStatus.LastMessageTime) - GinkgoWriter.Printf("VERIFY: ElapsedTime: %v\n", dm.Status.CommandStatus.ElapsedTime) - }) - }) - - Context("when there are multiple lines of progress output to be read", func() { - BeforeEach(func() { - scriptFilePath := createFakeProgressScript() - _, err := os.Stat(scriptFilePath) - Expect(err).ToNot(HaveOccurred()) - - cm.Data[configMapKeyCmd] = fmt.Sprintf("/bin/bash %s 10 .25", scriptFilePath) - cm.Data[configMapKeyProgInterval] = "1s" - }) - - It("LastMessage should not include multiple lines of output", func() { - Consistently(func(g Gomega) string { - lastMessage := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - lastMessage = dm.Status.CommandStatus.LastMessage - } - return lastMessage - }).ShouldNot(SatisfyAll( - BeEmpty(), - ContainSubstring("\n"), - )) - }) - }) - - Context("when a data movement operation is cancelled", func() { - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "sleep 5" - }) - It("should have a state and status of 'Finished' and 'Cancelled'", func() { - By("ensuring the data movement started") - Eventually(func(g Gomega) string { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status.State - }).Should(Equal(nnfv1alpha1.DataMovementConditionTypeRunning)) - - By("setting the cancel flag to true") - Eventually(func(g Gomega) error { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - dm.Spec.Cancel = true - return k8sClient.Update(context.TODO(), dm) - }).Should(Succeed()) - - By("verifying that it was cancelled successfully") - Eventually(func(g Gomega) nnfv1alpha1.NnfDataMovementStatus { - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - return dm.Status - }).Should(MatchFields(IgnoreExtras, Fields{ - "State": Equal(nnfv1alpha1.DataMovementConditionTypeFinished), - "Status": Equal(nnfv1alpha1.DataMovementConditionReasonCancelled), - })) + // These tests are for unit testing functions without the overhead of creating DM objects and allows + // us to make better use of DescribeTable(). + Describe("Unit Tests (without Reconciler)", func() { + + Context("MPI hostfile creation", func() { + hosts := []string{"node1", "node2"} + DescribeTable("setting hosts, slots, and maxSlots", + func(hosts []string, slots, maxSlots int, expected string) { + hostfilePath, err := createMpiHostfile("my-dm", hosts, slots, maxSlots) + defer os.RemoveAll(filepath.Dir(hostfilePath)) + Expect(err).To(BeNil()) + Expect(hostfilePath).ToNot(Equal("")) + + contents, err := os.ReadFile(hostfilePath) + Expect(err).To(BeNil()) + Expect(string(contents)).To(Equal(expected)) + }, + Entry("with no slots or max_slots", hosts, 0, 0, "node1\nnode2\n"), + Entry("with only slots", hosts, 4, 0, "node1 slots=4\nnode2 slots=4\n"), + Entry("with only max_slots", hosts, 0, 4, "node1 max_slots=4\nnode2 max_slots=4\n"), + Entry("with both slots and max_slots", hosts, 4, 4, "node1 slots=4 max_slots=4\nnode2 slots=4 max_slots=4\n"), + ) + }) + + Context("peekMpiHostfile", func() { + It("should return the first line of the hostfile", func() { + hosts := []string{"one", "two", "three"} + slots, maxSlots := 16, 32 + hostfilePath, err := createMpiHostfile("my-dm", hosts, slots, maxSlots) + Expect(err).ToNot(HaveOccurred()) + + actual := peekMpiHostfile(hostfilePath) + Expect(actual).To(Equal("one slots=16 max_slots=32\n")) + }) + }) + + Context("$HOSTFILE creation", func() { + hosts := []string{"one", "two", "three"} + dm := nnfv1alpha1.NnfDataMovement{ + Spec: nnfv1alpha1.NnfDataMovementSpec{ + UserId: 1000, + GroupId: 2000, + Source: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Path: "/src/", + }, + Destination: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Path: "/dest/", + }, + }, + } + When("$HOSTFILE is present", func() { + It("should create the hostfile", func() { + profile := dmConfigProfile{ + Command: "mpirun --hostfile $HOSTFILE dcp src dest", + } + + cmd, hostfile, err := buildDMCommand(profile, hosts, &dm) + Expect(err).ToNot(HaveOccurred()) + Expect(len(hostfile)).Should((BeNumerically(">", 0))) + Expect(cmd).ToNot(BeEmpty()) + info, err := os.Stat(hostfile) + Expect(err).ToNot(HaveOccurred()) + Expect(info).ToNot(BeNil()) + }) + + }) + When("$HOSTFILE is not present", func() { + It("should not create the hostfile", func() { + profile := dmConfigProfile{ + Command: "mpirun -np 1 dcp src dest", + } + + cmd, hostfile, err := buildDMCommand(profile, hosts, &dm) + Expect(err).ToNot(HaveOccurred()) + Expect(len(hostfile)).Should(Equal(0)) + Expect(cmd).ToNot(BeEmpty()) + info, err := os.Stat(hostfile) + Expect(err).To(HaveOccurred()) + Expect(info).To(BeNil()) + }) + + }) }) }) - Context("when the data movement operation has a set uid and gid", func() { - expectedGid := uint32(1000) - expectedUid := uint32(2000) - - BeforeEach(func() { - cm.Data[configMapKeyCmd] = "" - dm.Spec.GroupId = expectedGid - dm.Spec.UserId = expectedUid - }) - It("should use those in the data movement command", func() { - Eventually(func(g Gomega) string { - cmd := "" - g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm)).To(Succeed()) - if dm.Status.CommandStatus != nil { - cmd = dm.Status.CommandStatus.Command - } - return cmd - }).Should(Equal(fmt.Sprintf( - "mpirun --allow-run-as-root -np 1 --host localhost dcp --progress 1 --uid %d --gid %d %s %s", - expectedUid, expectedGid, srcPath, destPath))) - }) - }) }) diff --git a/daemons/compute/server/servers/server_default.go b/daemons/compute/server/servers/server_default.go index d74492de..a6b0b21f 100644 --- a/daemons/compute/server/servers/server_default.go +++ b/daemons/compute/server/servers/server_default.go @@ -285,16 +285,19 @@ func (s *defaultServer) Create(ctx context.Context, req *pb.DataMovementCreateRe if err != nil { return &pb.DataMovementCreateResponse{ Status: pb.DataMovementCreateResponse_FAILED, - Message: err.Error(), + Message: "Error finding compute mount info: " + err.Error(), }, nil } var dm *nnfv1alpha1.NnfDataMovement + dmFunc := "" switch computeMountInfo.Type { case "lustre": dm, err = s.createNnfDataMovement(ctx, req, computeMountInfo, computeClientMount) + dmFunc = "createNnfDataMovement()" case "gfs2": dm, err = s.createNnfNodeDataMovement(ctx, req, computeMountInfo) + dmFunc = "createNnfNodeDataMovement()" default: return &pb.DataMovementCreateResponse{ Status: pb.DataMovementCreateResponse_INVALID, @@ -305,14 +308,17 @@ func (s *defaultServer) Create(ctx context.Context, req *pb.DataMovementCreateRe if err != nil { return &pb.DataMovementCreateResponse{ Status: pb.DataMovementCreateResponse_FAILED, - Message: err.Error(), + Message: fmt.Sprintf("Error during %s: %s", dmFunc, err), }, nil } // Authentication userId, groupId, err := auth.GetAuthInfo(ctx) if err != nil { - return nil, err + return &pb.DataMovementCreateResponse{ + Status: pb.DataMovementCreateResponse_FAILED, + Message: "Error authenticating: " + err.Error(), + }, nil } dm.Spec.UserId = userId @@ -331,12 +337,12 @@ func (s *defaultServer) Create(ctx context.Context, req *pb.DataMovementCreateRe // Label the NnfDataMovement with a teardown state of "post_run" so the NNF workflow // controller can identify compute initiated data movements. - nnfv1alpha1.AddDataMovementTeardownStateLabel(dm, dwsv1alpha1.StatePostRun.String()) + nnfv1alpha1.AddDataMovementTeardownStateLabel(dm, dwsv1alpha1.StatePostRun) if err := s.client.Create(ctx, dm, &client.CreateOptions{}); err != nil { return &pb.DataMovementCreateResponse{ Status: pb.DataMovementCreateResponse_FAILED, - Message: err.Error(), + Message: "Error creating DataMovement: " + err.Error(), }, nil } diff --git a/deploy.sh b/deploy.sh index 2c770dbf..8475660f 100755 --- a/deploy.sh +++ b/deploy.sh @@ -39,7 +39,6 @@ deploy) $(cd config/manager && $KUSTOMIZE edit set image controller="$IMG") $KUSTOMIZE build config/default | kubectl apply -f - || true - $KUSTOMIZE build config/dm_config | kubectl create -f - || true # Sometimes the deployment of the DataMovementManager occurs to quickly for k8s to digest the CRD # Retry the deployment if this is the case. It seems to be fast enough where we can just @@ -56,7 +55,6 @@ undeploy) # removed, so the delete will always fail. We ignore all errors at our # own risk. $KUSTOMIZE build config/default | kubectl delete --ignore-not-found -f - - $KUSTOMIZE build config/dm_config | kubectl delete --ignore-not-found -f - ;; *) usage diff --git a/go.mod b/go.mod index 242110d2..6b8acb40 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/NearNodeFlash/nnf-dm go 1.19 require ( - github.com/HewlettPackard/dws v0.0.0-20221027172242-11a787dffb4c + github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075 github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988 - github.com/NearNodeFlash/nnf-sos v0.0.0-20221012202709-192706b48299 + github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a github.com/onsi/ginkgo/v2 v2.4.0 github.com/onsi/gomega v1.23.0 github.com/prometheus/client_golang v1.13.0 @@ -29,7 +29,7 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/NearNodeFlash/nnf-ec v0.0.0-20220920142937-1080197e523b // indirect + github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -84,7 +84,28 @@ require ( require ( cloud.google.com/go/compute v1.10.0 // indirect + github.com/HewlettPackard/structex v1.0.2 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/dgraph-io/badger/v3 v3.2103.1 // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/klauspost/compress v1.13.5 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/pkg/term v1.1.0 // indirect + github.com/rs/cors v1.8.0 // indirect + github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46 // indirect + github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c // indirect + github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29 // indirect + go.opencensus.io v0.23.0 // indirect + k8s.io/mount-utils v0.25.2 // indirect ) diff --git a/go.sum b/go.sum index e7df4ac8..b6342ea3 100644 --- a/go.sum +++ b/go.sum @@ -88,6 +88,9 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/HewlettPackard/dws v0.0.0-20221027172242-11a787dffb4c h1:E8x4Ovw8gYYIKJ9qMjf6Qf4toNG9OW6VGtbkPFmIFXY= github.com/HewlettPackard/dws v0.0.0-20221027172242-11a787dffb4c/go.mod h1:OxTK/qLMVBktON5wHI6zGuKEsoT8iLpPtUX+y02/m10= +github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075 h1:847PiLepVQ10SoPQEc3OKdxWg6LXt70WXcCQj7ih3Bc= +github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075/go.mod h1:OxTK/qLMVBktON5wHI6zGuKEsoT8iLpPtUX+y02/m10= +github.com/HewlettPackard/structex v1.0.2 h1:p2EH/p6zvUd5fSa0onudAUvLWdKsvQSRP0jGKeblA5E= github.com/HewlettPackard/structex v1.0.2/go.mod h1:3frC4RY/cPsP/4+N8rkxsNAGlQwHV+zDC7qvrN+N+rE= github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= @@ -95,8 +98,16 @@ github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988 h github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988/go.mod h1:j01cfKrQnsszCBkNGuwymvcAi1Rt7i5ZvzjKwUHBjyA= github.com/NearNodeFlash/nnf-ec v0.0.0-20220920142937-1080197e523b h1:+4x/yTuM6j1KzEmKZrBx9LmZEARJ/zc5mH+38yZnU3c= github.com/NearNodeFlash/nnf-ec v0.0.0-20220920142937-1080197e523b/go.mod h1:s6Gp5d88rhMFcqrkF/Ds8D+n6GMAv9iw004YhAL3Neo= +github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f h1:Dr/JRcUA0N8ZKbUnXx/J6pHIPfVT7rrzFFFKyelRe8E= +github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f/go.mod h1:s6Gp5d88rhMFcqrkF/Ds8D+n6GMAv9iw004YhAL3Neo= github.com/NearNodeFlash/nnf-sos v0.0.0-20221012202709-192706b48299 h1:y7Isj6LA6CFI/JPyeXOwt7ii09PTiVmkx99N09Zwjg4= github.com/NearNodeFlash/nnf-sos v0.0.0-20221012202709-192706b48299/go.mod h1:ytCSNDNgPEqNndL042/FeozWIVUixWOD/mmPqhIDYDY= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a h1:4GTC2q72uJow9C5JpAfh7xjGW1HwlFdEvt++ma51Kz4= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a/go.mod h1:VWtvXpr3ndLhFnxib7LNvHQb1xPp13vj6Kn0NGslNv0= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230227170353-585d71d73f83 h1:mfi2xf+BOnUHWvDna9VpMWEm1ZsLxX+lP3s0evj1cAg= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230227170353-585d71d73f83/go.mod h1:VWtvXpr3ndLhFnxib7LNvHQb1xPp13vj6Kn0NGslNv0= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230227230025-fa4f33456559 h1:UQwhTzdjCFFqj3S0ljraIJZ9oVB8VcHrJhy7H9M75I8= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230227230025-fa4f33456559/go.mod h1:VWtvXpr3ndLhFnxib7LNvHQb1xPp13vj6Kn0NGslNv0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VividCortex/mysqlerr v1.0.0/go.mod h1:xERx8E4tBhLvpjzdUyQiSfUxeMcATEQrflDAfXsqcAE= github.com/alecthomas/kong v0.2.17/go.mod h1:ka3VZ8GZNPXv9Ov+j4YNLkI8mTuhXyr/0ktSlqIydQQ= @@ -128,6 +139,7 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -155,12 +167,15 @@ github.com/danjacques/gofslock v0.0.0-20200623023034-5d0bd0fa6ef0/go.mod h1:DC3J github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v3 v3.2103.1 h1:zaX53IRg7ycxVlkd5pYdCeFp1FynD6qBGQoQql3R3Hk= github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -228,6 +243,7 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -264,12 +280,14 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= @@ -325,6 +343,7 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20210901121439-eee08aaf2717/go.mod h1:0RnbP5ioI0nqRf3R9iK3iQaUJgsn0htlZEHCMn8FSfw= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -393,6 +412,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -427,6 +447,7 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= @@ -449,6 +470,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -496,6 +519,7 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -538,6 +562,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -546,19 +571,24 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46 h1:Dz0HrI1AtNSGCE8LXLLqoZU4iuOJXPWndenCsZfstA8= github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46/go.mod h1:is8FVkzSi7PYLWEXT5MgWhglFsyyiW8ffxAoJqfuFZo= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c h1:hk0Jigjfq59yDMgd6bzi22Das5tyxU0CtOkh7a9io84= github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c/go.mod h1:cyrWuItcOVIGX6fBZ/G00z4ykprWM7hH58fSavNkjRg= +github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144 h1:ccb8W1+mYuZvlpn/mJUMAbsFHTMCpcJBS78AsBQxNcY= github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144/go.mod h1:VRI4lXkrUH5Cygl6mbG1BRUfMMoT2o8BkrtBDUAm+GU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -607,6 +637,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29 h1:TgvWIQO0I0E7pKVLq/ARDhT2p2kHGKLtZhAN0EtUxiI= go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29/go.mod h1:gk1qa1CBZAdya1PqugRJjkwk8LIymAoB5wf5UA7NrFk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -616,6 +647,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= @@ -856,6 +888,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1160,6 +1194,8 @@ k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220928191237-829ce0c27909 h1:q/70bz7C1/LGuQu/JBX7Fpi55CwcCts/wbvlehe0RRo= k8s.io/kube-openapi v0.0.0-20220928191237-829ce0c27909/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/mount-utils v0.25.0/go.mod h1:WTYq8Ev/JrnkqK2h1jFUnC8qWGuqzMb9XDC+Lu3WNU0= +k8s.io/mount-utils v0.25.2 h1:ZNIHSaPaaPqFqtZzaTbgH3oHKoWZHoNBnTKafQRj6A4= +k8s.io/mount-utils v0.25.2/go.mod h1:WTYq8Ev/JrnkqK2h1jFUnC8qWGuqzMb9XDC+Lu3WNU0= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220922133306-665eaaec4324 h1:i+xdFemcSNuJvIfBlaYuXgRondKxK4z4prVPKzEaelI= k8s.io/utils v0.0.0-20220922133306-665eaaec4324/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/main.go b/main.go index 087be6b5..4016fbf8 100644 --- a/main.go +++ b/main.go @@ -199,7 +199,6 @@ type nodeController struct { func (*nodeController) GetType() string { return NodeController } func (*nodeController) SetOptions(opts *ctrl.Options) { - // Use the default namespace to access the nnf-dm-config ConfigMap - namespaces := []string{corev1.NamespaceDefault, os.Getenv("NNF_NODE_NAME")} + namespaces := []string{dmv1alpha1.DataMovementNamespace, os.Getenv("NNF_NODE_NAME")} opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaces) } diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/clientmount_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/clientmount_types.go index 37eae4a4..9a160005 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/clientmount_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/clientmount_types.go @@ -117,6 +117,15 @@ type ClientMountInfo struct { // Client path for mount target MountPath string `json:"mountPath"` + // UserID to set for the mount + UserID uint32 `json:"userID,omitempty"` + + // GroupID to set for the mount + GroupID uint32 `json:"groupID,omitempty"` + + // SetPermissions will set UserID and GroupID on the mount if true + SetPermissions bool `json:"setPermissions"` + // Options for the file system mount Options string `json:"options"` diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/directivebreakdown_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/directivebreakdown_types.go index 7d565a9b..b7d77a1d 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/directivebreakdown_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/directivebreakdown_types.go @@ -29,6 +29,14 @@ import ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type AllocationStrategy string + +const ( + AllocatePerCompute AllocationStrategy = "AllocatePerCompute" + AllocateAcrossServers AllocationStrategy = "AllocateAcrossServers" + AllocateSingleServer AllocationStrategy = "AllocateSingleServer" +) + const ( // DirectiveLifetimeJob specifies storage allocated for the lifetime of the job DirectiveLifetimeJob = "job" @@ -64,8 +72,8 @@ type AllocationSetConstraints struct { // StorageAllocationSet defines the details of an allocation set type StorageAllocationSet struct { // AllocationStrategy specifies the way to determine the number of allocations of the MinimumCapacity required for this AllocationSet. - // +kubebuilder:validation:Enum=AllocatePerCompute;AllocateAcrossServers;AllocateSingleServer;AssignPerCompute;AssignAcrossServers; - AllocationStrategy string `json:"allocationStrategy"` + // +kubebuilder:validation:Enum=AllocatePerCompute;AllocateAcrossServers;AllocateSingleServer + AllocationStrategy AllocationStrategy `json:"allocationStrategy"` // MinimumCapacity is the minumum number of bytes required to meet the needs of the filesystem that // will use the storage. diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/persistentstorageinstance_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/persistentstorageinstance_types.go index 901cf8fe..3263908d 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/persistentstorageinstance_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/persistentstorageinstance_types.go @@ -41,15 +41,15 @@ type PersistentStorageInstanceState string // State enumerations const ( // The PSI resource exists in k8s, but the storage and filesystem that it represents has not been created yet - PSIStateCreating PersistentStorageInstanceState = "creating" + PSIStateCreating PersistentStorageInstanceState = "Creating" // The storage and filesystem represented by the PSI exists and is ready for use - PSIStateActive PersistentStorageInstanceState = "active" + PSIStateActive PersistentStorageInstanceState = "Active" // A #DW destroy_persistent directive has been issued in a workflow. // Once all other workflows with persistent_dw reservations on the PSI complete, the PSI will be destroyed. // New #DW persistent_dw requests after the PSI enters the 'destroying' state will fail. - PSIStateDestroying PersistentStorageInstanceState = "destroying" + PSIStateDestroying PersistentStorageInstanceState = "Destroying" ) // PersistentStorageInstanceSpec defines the desired state of PersistentStorageInstance @@ -68,7 +68,7 @@ type PersistentStorageInstanceSpec struct { UserID uint32 `json:"userID"` // Desired state of the PersistentStorageInstance - // +kubebuilder:validation:Enum:=active;destroying + // +kubebuilder:validation:Enum:=Active;Destroying State PersistentStorageInstanceState `json:"state"` // List of consumers using this persistent storage @@ -81,7 +81,7 @@ type PersistentStorageInstanceStatus struct { Servers corev1.ObjectReference `json:"servers,omitempty"` // Current state of the PersistentStorageInstance - // +kubebuilder:validation:Enum:=creating;active;destroying + // +kubebuilder:validation:Enum:=Creating;Active;Destroying State PersistentStorageInstanceState `json:"state"` // Error information diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/resource.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/resource.go new file mode 100644 index 00000000..d9266241 --- /dev/null +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/resource.go @@ -0,0 +1,71 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +// ResourceState is the enumeration of the state of a DWS resource +// +kubebuilder:validation:Enum:=Enabled;Disabled +type ResourceState string + +const ( + // NOTE: Any new enumeration values must update the related kubebuilder validation + + // Enabled means the resource shall be enabled. + EnabledState ResourceState = "Enabled" + + // Disabled means the resource shall be disabled. + DisabledState ResourceState = "Disabled" +) + +// ResourceStatus is the enumeration of the status of a DWS resource +// +kubebuilder:validation:Enum:=Starting;Ready;Disabled;NotPresent;Offline;Failed;Degraded;Unknown +type ResourceStatus string + +const ( + // NOTE: Any new enumeration values must update the related kubebuilder validation + + // Starting means the resource is currently starting prior to becoming ready. + StartingStatus ResourceStatus = "Starting" + + // Ready means the resource is fully operational and ready for use. + ReadyStatus ResourceStatus = "Ready" + + // Disabled means the resource is present but disabled by an administrator or external + // user. + DisabledStatus ResourceStatus = "Disabled" + + // NotPresent means the resource is not present within the system, likely because + // it is missing or powered down. This differs from the Offline state in that the + // resource is not known to exist. + NotPresentStatus ResourceStatus = "NotPresent" + + // Offline means the resource is offline and cannot be communicated with. This differs + // fro the NotPresent state in that the resource is known to exist. + OfflineStatus ResourceStatus = "Offline" + + // Failed means the resource has failed during startup or execution. + FailedStatus ResourceStatus = "Failed" + + // Degraded means the resource is ready but operating in a degraded state. Certain + // recovery actions may alleviate a degraded status. + DegradedStatus ResourceStatus = "Degraded" + + // Unknown means the resource status is unknown. + UnknownStatus ResourceStatus = "Unknown" +) diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storage_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storage_types.go index e9ec9f53..b0c36d7c 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storage_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storage_types.go @@ -20,6 +20,7 @@ package v1alpha1 import ( + "github.com/HewlettPackard/dws/utils/updater" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,13 +28,15 @@ const ( // StorageTypeLabel is the label key used for tagging Storage resources // with a driver specific label. For example: dws.cray.hpe.com/storage=Rabbit StorageTypeLabel = "dws.cray.hpe.com/storage" - - // StoragePoolLabelPrefix is the prefix for the label key used for tagging - // Storage resources with a storage pool label. - // For example: dws.cray.hpe.com/storage-pool-default=true - StoragePoolLabelPrefix = "dws.cray.hpe.com/storage-pool-" ) +// StorageSpec defines the desired specifications of Storage resource +type StorageSpec struct { + // State describes the desired state of the Storage resource. + // +kubebuilder:default:=Enabled + State ResourceState `json:"state,omitempty"` +} + // StorageDevice contains the details of the storage hardware type StorageDevice struct { // Model is the manufacturer information about the device @@ -57,8 +60,7 @@ type StorageDevice struct { WearLevel *int64 `json:"wearLevel,omitempty"` // Status of the individual device - // +kubebuilder:validation:Enum=Starting;Ready;Disabled;NotPresent;Offline;Failed - Status string `json:"status,omitempty"` + Status ResourceStatus `json:"status,omitempty"` } // Node provides the status of either a compute or a server @@ -67,30 +69,41 @@ type Node struct { Name string `json:"name,omitempty"` // Status of the node - // +kubebuilder:validation:Enum=Starting;Ready;Disabled;NotPresent;Offline;Failed - Status string `json:"status,omitempty"` + Status ResourceStatus `json:"status,omitempty"` } +// StorageAccessProtocol is the enumeration of supported protocols. +// +kubebuilder:validation:Enum:=PCIe +type StorageAccessProtocol string + +const ( + PCIe StorageAccessProtocol = "PCIe" +) + // StorageAccess contains nodes and the protocol that may access the storage type StorageAccess struct { // Protocol is the method that this storage can be accessed - // +kubebuilder:validation:Enum=PCIe - Protocol string `json:"protocol,omitempty"` + Protocol StorageAccessProtocol `json:"protocol,omitempty"` - // Servers is the list of non-compute nodes that have access to - // the storage + // Servers is the list of non-compute nodes that have access to the storage Servers []Node `json:"servers,omitempty"` - // Computes is the list of compute nodes that have access to - // the storage + // Computes is the list of compute nodes that have access to the storage Computes []Node `json:"computes,omitempty"` } +// StorageType is the enumeration of storage types. +// +kubebuilder:validation:Enum:=NVMe +type StorageType string + +const ( + NVMe StorageType = "NVMe" +) + // StorageData contains the data about the storage -type StorageData struct { +type StorageStatus struct { // Type describes what type of storage this is - // +kubebuilder:validation:Enum=NVMe - Type string `json:"type,omitempty"` + Type StorageType `json:"type,omitempty"` // Devices is the list of physical devices that make up this storage Devices []StorageDevice `json:"devices,omitempty"` @@ -105,17 +118,33 @@ type StorageData struct { Capacity int64 `json:"capacity"` // Status is the overall status of the storage - // +kubebuilder:validation:Enum=Starting;Ready;Disabled;NotPresent;Offline;Failed - Status string `json:"status,omitempty"` + Status ResourceStatus `json:"status,omitempty"` + + // Reboot Required is true if the node requires a reboot and false otherwise. A reboot my be + // necessary to recover from certain hardware failures or high-availability clustering events. + RebootRequired bool `json:"rebootRequired,omitempty"` + + // Message provides additional details on the current status of the resource + Message string `json:"message,omitempty"` } +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="State",type="string",JSONPath=".spec.state",description="State of the storage resource" +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.status",description="Status of the storage resource" +//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + // Storage is the Schema for the storages API -// +kubebuilder:object:root=true type Storage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Data StorageData `json:"data,omitempty"` + Spec StorageSpec `json:"spec"` + Status StorageStatus `json:"status,omitempty"` +} + +func (s *Storage) GetStatus() updater.Status[*StorageStatus] { + return &s.Status } //+kubebuilder:object:root=true diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storagepool_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storagepool_types.go deleted file mode 100644 index f04d3f19..00000000 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/storagepool_types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2021, 2022 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StoragePoolSpec defines the desired state of StoragePool -type StoragePoolSpec struct { - PoolID string `json:"poolID"` - Units string `json:"units"` - Granularity string `json:"granularity"` - Quantity int `json:"quantity"` - Free int `json:"free"` -} - -// StoragePoolStatus defines the observed state of StoragePool -type StoragePoolStatus struct { - State string `json:"state"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status - -// StoragePool is the Schema for the storagepools API -type StoragePool struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec StoragePoolSpec `json:"spec,omitempty"` - Status StoragePoolStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// StoragePoolList contains a list of StoragePool -type StoragePoolList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []StoragePool `json:"items"` -} - -func init() { - SchemeBuilder.Register(&StoragePool{}, &StoragePoolList{}) -} diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go index b82b3a37..09e7c53f 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go @@ -42,7 +42,6 @@ type SystemConfigurationComputeNodeReference struct { // SystemConfigurationStorageNode describes a storage node in the system type SystemConfigurationStorageNode struct { // Type is the type of server - // +kubebuilder:validation:Enum=Rabbit Type string `json:"type"` // Name of the server node diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_types.go index 5d1dbb52..0462bf89 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_types.go @@ -20,8 +20,6 @@ package v1alpha1 import ( - "fmt" - "github.com/HewlettPackard/dws/utils/updater" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,28 +33,60 @@ const ( WorkflowNamespaceLabel = "dws.cray.hpe.com/workflow.namespace" ) -// WorkflowState is the enumeration of The state of the workflow -type WorkflowState int +// WorkflowState is the enumeration of the state of the workflow +// +kubebuilder:validation:Enum:=Proposal;Setup;DataIn;PreRun;PostRun;DataOut;Teardown +type WorkflowState string -// State enumerations +// WorkflowState values const ( - StateProposal WorkflowState = iota - StateSetup - StateDataIn - StatePreRun - StatePostRun - StateDataOut - StateTeardown + StateProposal WorkflowState = "Proposal" + StateSetup WorkflowState = "Setup" + StateDataIn WorkflowState = "DataIn" + StatePreRun WorkflowState = "PreRun" + StatePostRun WorkflowState = "PostRun" + StateDataOut WorkflowState = "DataOut" + StateTeardown WorkflowState = "Teardown" ) -var workflowStrings = [...]string{ - "proposal", - "setup", - "data_in", - "pre_run", - "post_run", - "data_out", - "teardown", +// Next reports the next state after state s +func (s WorkflowState) next() WorkflowState { + switch s { + case "": + return StateProposal + case StateProposal: + return StateSetup + case StateSetup: + return StateDataIn + case StateDataIn: + return StatePreRun + case StatePreRun: + return StatePostRun + case StatePostRun: + return StateDataOut + case StateDataOut: + return StateTeardown + } + + panic(s) +} + +// Last reports whether the state s is the last state +func (s WorkflowState) last() bool { + return s == StateTeardown +} + +// After reports whether the state s is after t +func (s WorkflowState) after(t WorkflowState) bool { + + for !t.last() { + next := t.next() + if s == next { + return true + } + t = next + } + + return false } // Strings associated with workflow statuses @@ -69,30 +99,14 @@ const ( StatusDriverWait = "DriverWait" ) -func (s WorkflowState) String() string { - return workflowStrings[s] -} - -// GetWorkflowState returns the WorkflowState constant that matches the -// string value passed in -func GetWorkflowState(state string) (WorkflowState, error) { - for i := StateProposal; i <= StateTeardown; i++ { - if i.String() == state { - return i, nil - } - } - - return StateProposal, fmt.Errorf("invalid workflow state '%s'", state) -} - // WorkflowSpec defines the desired state of Workflow type WorkflowSpec struct { // Desired state for the workflow to be in. Unless progressing to the teardown state, // this can only be set to the next state when the current desired state has been achieved. - // +kubebuilder:validation:Enum=proposal;setup;data_in;pre_run;post_run;data_out;teardown - DesiredState string `json:"desiredState"` - WLMID string `json:"wlmID"` - JobID int `json:"jobID"` + DesiredState WorkflowState `json:"desiredState"` + + WLMID string `json:"wlmID"` + JobID int `json:"jobID"` // UserID specifies the user ID for the workflow. The User ID is used by the various states // in the workflow to ensure the user has permissions to perform certain actions. Used in @@ -116,12 +130,14 @@ type WorkflowSpec struct { // WorkflowDriverStatus defines the status information provided by integration drivers. type WorkflowDriverStatus struct { - DriverID string `json:"driverID"` - TaskID string `json:"taskID"` - DWDIndex int `json:"dwdIndex"` - WatchState string `json:"watchState"` - LastHB int64 `json:"lastHB"` - Completed bool `json:"completed"` + DriverID string `json:"driverID"` + TaskID string `json:"taskID"` + DWDIndex int `json:"dwdIndex"` + + WatchState WorkflowState `json:"watchState"` + + LastHB int64 `json:"lastHB"` + Completed bool `json:"completed"` // User readable reason. // For the CDS driver, this could be the state of the underlying @@ -129,6 +145,7 @@ type WorkflowDriverStatus struct { // +kubebuilder:validation:Enum=Pending;Queued;Running;Completed;Error;DriverWait Status string `json:"status,omitempty"` + // Message provides additional details on the current status of the resource Message string `json:"message,omitempty"` // Driver error string. This is not rolled up into the workflow's @@ -143,8 +160,7 @@ type WorkflowDriverStatus struct { type WorkflowStatus struct { // The state the resource is currently transitioning to. // Updated by the controller once started. - // +kubebuilder:default:=proposal - State string `json:"state"` + State WorkflowState `json:"state,omitempty"` // Ready can be 'True', 'False' // Indicates whether State has been reached. @@ -152,7 +168,9 @@ type WorkflowStatus struct { // User readable reason and status message // +kubebuilder:validation:Enum=Completed;DriverWait;Error - Status string `json:"status,omitempty"` + Status string `json:"status,omitempty"` + + // Message provides additional details on the current status of the resource Message string `json:"message,omitempty"` // Set of DW environment variable settings for WLM to apply to the job. diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_webhook.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_webhook.go index 063696bf..69269e3a 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_webhook.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/workflow_webhook.go @@ -27,6 +27,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -50,7 +51,7 @@ func (w *Workflow) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:path=/mutate-dws-cray-hpe-com-v1alpha1-workflow,mutating=true,failurePolicy=fail,sideEffects=None,groups=dws.cray.hpe.com,resources=workflows,verbs=create;update,versions=v1alpha1,name=mworkflow.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/mutate-dws-cray-hpe-com-v1alpha1-workflow,mutating=true,failurePolicy=fail,sideEffects=None,groups=dws.cray.hpe.com,resources=workflows,verbs=create,versions=v1alpha1,name=mworkflow.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Defaulter = &Workflow{} @@ -76,14 +77,17 @@ var _ webhook.Validator = &Workflow{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (w *Workflow) ValidateCreate() error { - if w.Spec.DesiredState != StateProposal.String() { - return fmt.Errorf("desired state must start in %s", StateProposal.String()) + specPath := field.NewPath("Spec") + + if w.Spec.DesiredState != StateProposal { + s := fmt.Sprintf("desired state must start in %s", StateProposal) + return field.Invalid(specPath.Child("DesiredState"), w.Spec.DesiredState, s) } if w.Spec.Hurry == true { - return fmt.Errorf("the hurry flag may not be set on creation") + return field.Forbidden(specPath.Child("Hurry"), "the hurry flag may not be set on creation") } if w.Status.State != "" { - return fmt.Errorf("the status state may not be set") + return field.Forbidden(field.NewPath("Status").Child("State"), "the status state may not be set on creation") } return checkDirectives(w, &ValidatingRuleParser{}) @@ -100,8 +104,9 @@ func (w *Workflow) ValidateUpdate(old runtime.Object) error { return err } - if w.Spec.Hurry == true && w.Spec.DesiredState != StateTeardown.String() { - return fmt.Errorf("the hurry flag may only be set for %s", StateTeardown.String()) + if w.Spec.Hurry == true && w.Spec.DesiredState != StateTeardown { + s := fmt.Sprintf("the hurry flag may be set only in %s", StateTeardown) + return field.Invalid(field.NewPath("Spec").Child("Hurry"), w.Spec.Hurry, s) } // Check that immutable fields haven't changed. @@ -112,47 +117,42 @@ func (w *Workflow) ValidateUpdate(old runtime.Object) error { // Initial setup of the Workflow by the dws controller requires setting the status // state to proposal and adding a finalizer. - if oldWorkflow.Status.State == "" && w.Spec.DesiredState == StateProposal.String() { + if oldWorkflow.Status.State == "" && w.Spec.DesiredState == StateProposal { return nil } // Validate the elements in the Drivers array for i, driverStatus := range w.Status.Drivers { + + driverError := func(errString string) error { + return field.InternalError(field.NewPath("Status").Child("Drivers").Index(i), fmt.Errorf(errString)) + } + // Elements with watchStates not equal to the current state should not change if driverStatus.WatchState != oldWorkflow.Status.State { if !reflect.DeepEqual(oldWorkflow.Status.Drivers[i], driverStatus) { - return fmt.Errorf("driver entry for non-current state cannot be changed") + return driverError("driver entry for non-current state cannot be changed") } - continue } if driverStatus.Completed == true { if driverStatus.Status != StatusCompleted { - return fmt.Errorf("driver cannot be completed without status=Completed") + return driverError("driver cannot be completed without status=Completed") } if driverStatus.Error != "" { - return fmt.Errorf("driver cannot be completed when error is present") + return driverError("driver cannot be completed when error is present") } } else { if oldWorkflow.Status.Drivers[i].Completed == true { - return fmt.Errorf("driver cannot change from completed state") + return driverError("driver cannot change from completed state") } } } - // New state is the desired state in the Spec - newState, err := GetWorkflowState(w.Spec.DesiredState) - if err != nil { - return fmt.Errorf("unable to parse DesiredState: %w", err) - } - - // Old state is the current state we're on. This is found in the status - oldState, err := GetWorkflowState(oldWorkflow.Status.State) - if err != nil { - return fmt.Errorf("unable to find current state: %w", err) - } + oldState := oldWorkflow.Status.State + newState := w.Spec.DesiredState // Progressing to teardown is allowed at any time, and changes to the // Workflow that don't change the state are fine too (immutable fields were @@ -161,16 +161,17 @@ func (w *Workflow) ValidateUpdate(old runtime.Object) error { return nil } - if newState < oldState { - return fmt.Errorf("state cannot progress backwards") + // Error checks + if oldState.after(newState) { + return field.Invalid(field.NewPath("Spec").Child("DesiredState"), w.Spec.DesiredState, "DesiredState cannot progress backwards") } - if newState > oldState+1 { - return fmt.Errorf("states cannot be skipped") + if oldState.next() != newState { + return field.Invalid(field.NewPath("Spec").Child("DesiredState"), w.Spec.DesiredState, "states cannot be skipped") } if !oldWorkflow.Status.Ready { - return fmt.Errorf("current desired state not yet achieved") + return field.Invalid(field.NewPath("Status").Child("State"), oldWorkflow.Status.State, "current desired state not yet achieved") } return nil @@ -181,25 +182,30 @@ func (w *Workflow) ValidateDelete() error { return nil } -func validateWorkflowImmutable(a *Workflow, b *Workflow) error { - if a.Spec.WLMID != b.Spec.WLMID { - return fmt.Errorf("cannot change immutable field WLMID") +func validateWorkflowImmutable(newWorkflow *Workflow, oldWorkflow *Workflow) error { + + immutableError := func(childField string) error { + return field.Forbidden(field.NewPath("Spec").Child(childField), "field is immutable") + } + + if newWorkflow.Spec.WLMID != oldWorkflow.Spec.WLMID { + return immutableError("WLMID") } - if a.Spec.JobID != b.Spec.JobID { - return fmt.Errorf("cannot change immutable field JobID") + if newWorkflow.Spec.JobID != oldWorkflow.Spec.JobID { + return immutableError("JobID") } - if a.Spec.UserID != b.Spec.UserID { - return fmt.Errorf("cannot change immutable field UserID") + if newWorkflow.Spec.UserID != oldWorkflow.Spec.UserID { + return immutableError("UserID") } - if a.Spec.GroupID != b.Spec.GroupID { - return fmt.Errorf("cannot change immutable field GroupID") + if newWorkflow.Spec.GroupID != oldWorkflow.Spec.GroupID { + return immutableError("GroupID") } - if !reflect.DeepEqual(a.Spec.DWDirectives, b.Spec.DWDirectives) { - return fmt.Errorf("cannot change immutable field DWDirectives") + if !reflect.DeepEqual(newWorkflow.Spec.DWDirectives, oldWorkflow.Spec.DWDirectives) { + return immutableError("DWDirectives") } return nil @@ -211,38 +217,16 @@ func checkDirectives(workflow *Workflow, ruleParser RuleParser) error { return nil } - err := ruleParser.ReadRules() - if err != nil { + if err := ruleParser.ReadRules(); err != nil { return err } - uniqueMap := make(map[string]bool) - - for i, directive := range workflow.Spec.DWDirectives { - validDirective := false - for _, rule := range ruleParser.GetRuleList() { - // validate #DW syntax - const rejectUnsupportedCommands bool = true - - valid, err := dwdparse.ValidateDWDirective(rule, directive, uniqueMap, rejectUnsupportedCommands) - if err != nil { - // #DW parser validation failed - workflowlog.Info("dwDirective validation failed", "Error", err) - return err - } - - if valid { - validDirective = true - ruleParser.MatchedDirective(workflow, rule.WatchStates, i, rule.DriverLabel) - } - } - - if !validDirective { - return fmt.Errorf("invalid directive found: '%s'", directive) - } + // Forward the rule and directive index to the rule parsers matched directive handling + onValidDirectiveFunc := func(index int, rule dwdparse.DWDirectiveRuleSpec) { + ruleParser.MatchedDirective(workflow, rule.WatchStates, index, rule.DriverLabel) } - return nil + return dwdparse.Validate(ruleParser.GetRuleList(), workflow.Spec.DWDirectives, onValidDirectiveFunc) } // RuleParser defines the interface a rule parser must provide @@ -305,12 +289,12 @@ type MutatingRuleParser struct { // MatchedDirective updates the driver status entries to indicate driver availability func (r *MutatingRuleParser) MatchedDirective(workflow *Workflow, watchStates string, index int, label string) { - if watchStates == "" { + if len(watchStates) == 0 { // Nothing to do return } - registrationMap := make(map[string]bool) + registrationMap := make(map[WorkflowState]bool) // Build a map of previously registered states for this driver for _, s := range workflow.Status.Drivers { if s.DWDIndex != index { @@ -326,6 +310,8 @@ func (r *MutatingRuleParser) MatchedDirective(workflow *Workflow, watchStates st // Update driver status entries to indicate driver availability for _, state := range strings.Split(watchStates, ",") { + state := WorkflowState(state) + // If this driver is already registered for this directive, skip it if registrationMap[state] { continue @@ -339,7 +325,7 @@ func (r *MutatingRuleParser) MatchedDirective(workflow *Workflow, watchStates st Status: StatusPending, } workflow.Status.Drivers = append(workflow.Status.Drivers, driverStatus) - workflowlog.Info("Registering driver", "Driver", driverStatus.DriverID, "Watch state", driverStatus.WatchState) + workflowlog.Info("Registering driver", "Driver", driverStatus.DriverID, "Watch state", state) } } diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go index 3913c095..a91fa935 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* - * Copyright 2022 Hewlett Packard Enterprise Development LP + * Copyright 2023 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -922,7 +922,8 @@ func (in *Storage) DeepCopyInto(out *Storage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Data.DeepCopyInto(&out.Data) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. @@ -1007,29 +1008,6 @@ func (in *StorageBreakdown) DeepCopy() *StorageBreakdown { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageData) DeepCopyInto(out *StorageData) { - *out = *in - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]StorageDevice, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Access.DeepCopyInto(&out.Access) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageData. -func (in *StorageData) DeepCopy() *StorageData { - if in == nil { - return nil - } - out := new(StorageData) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageDevice) DeepCopyInto(out *StorageDevice) { *out = *in @@ -1083,90 +1061,39 @@ func (in *StorageList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StoragePool) DeepCopyInto(out *StoragePool) { +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoragePool. -func (in *StoragePool) DeepCopy() *StoragePool { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { if in == nil { return nil } - out := new(StoragePool) + out := new(StorageSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StoragePool) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StoragePoolList) DeepCopyInto(out *StoragePoolList) { +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StoragePool, len(*in)) + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]StorageDevice, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.Access.DeepCopyInto(&out.Access) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoragePoolList. -func (in *StoragePoolList) DeepCopy() *StoragePoolList { - if in == nil { - return nil - } - out := new(StoragePoolList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StoragePoolList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StoragePoolSpec) DeepCopyInto(out *StoragePoolSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoragePoolSpec. -func (in *StoragePoolSpec) DeepCopy() *StoragePoolSpec { - if in == nil { - return nil - } - out := new(StoragePoolSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StoragePoolStatus) DeepCopyInto(out *StoragePoolStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoragePoolStatus. -func (in *StoragePoolStatus) DeepCopy() *StoragePoolStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { if in == nil { return nil } - out := new(StoragePoolStatus) + out := new(StorageStatus) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/HewlettPackard/dws/utils/dwdparse/dwdparse.go b/vendor/github.com/HewlettPackard/dws/utils/dwdparse/dwdparse.go index f0a27c35..8fd03bb7 100644 --- a/vendor/github.com/HewlettPackard/dws/utils/dwdparse/dwdparse.go +++ b/vendor/github.com/HewlettPackard/dws/utils/dwdparse/dwdparse.go @@ -20,7 +20,6 @@ package dwdparse import ( - "errors" "fmt" "regexp" "strconv" @@ -59,174 +58,142 @@ type DWDirectiveRuleSpec struct { RuleDefs []DWDirectiveRuleDef `json:"ruleDefs"` } -type dwUnsupportedCommandErr struct { - command string -} - -// NewUnsupportedCommandErr returns a reference to the unsupported command type -func NewUnsupportedCommandErr(command string) error { - return &dwUnsupportedCommandErr{command} -} +// BuildArgsMap builds a map of the DWDirective's arguments in the form: args["key"] = value +func BuildArgsMap(dwd string) (map[string]string, error) { -func (e *dwUnsupportedCommandErr) Error() string { - return fmt.Sprintf("Unsupported Command: '%s'", e.command) -} + dwdArgs := strings.Fields(dwd) -// IsUnsupportedCommand returns true if the error indicates that the command -// is unsupported -func IsUnsupportedCommand(err error) bool { - if err == nil { - return false + if len(dwdArgs) == 0 || dwdArgs[0] != "#DW" { + return nil, fmt.Errorf("missing '#DW' prefix in directive '%s'", dwd) } - _, ok := err.(*dwUnsupportedCommandErr) - return ok -} - -// BuildRulesMap builds a map of the DWDirectives argument parser rules for the specified command -func BuildRulesMap(rule DWDirectiveRuleSpec, cmd string) (map[string]DWDirectiveRuleDef, error) { - rulesMap := make(map[string]DWDirectiveRuleDef) - for _, rd := range rule.RuleDefs { - rulesMap[rd.Key] = rd - } + argsMap := make(map[string]string) + argsMap["command"] = dwdArgs[1] + for i := 2; i < len(dwdArgs); i++ { + keyValue := strings.Split(dwdArgs[i], "=") + + // Don't allow repeated arguments + _, ok := argsMap[keyValue[0]] + if ok { + return nil, fmt.Errorf("repeated argument '%s' in directive '%s'", keyValue[0], dwd) + } - if len(rulesMap) == 0 { - return nil, NewUnsupportedCommandErr(cmd) + if len(keyValue) == 1 { + argsMap[keyValue[0]] = "true" + } else if len(keyValue) == 2 { + argsMap[keyValue[0]] = keyValue[1] + } else { + keyValue := strings.SplitN(dwdArgs[i], "=", 2) + argsMap[keyValue[0]] = keyValue[1] + } } - return rulesMap, nil + return argsMap, nil } -// BuildArgsMap builds a map of the DWDirective's arguments in the form: args["key"] = value -func BuildArgsMap(dwd string) (map[string]string, error) { - argsMap := make(map[string]string) - dwdArgs := strings.Fields(dwd) +// Compile this regex outside the loop for better performance. +var boolMatcher = regexp.MustCompile(`(?i)^(true|false)$`) // (?i) -> case-insensitve comparison - if len(dwdArgs) == 0 { - return nil, fmt.Errorf("Invalid format for directive '%s'", dwd) +// ValidateArgs validates a map of arguments against the rule specification +func ValidateArgs(spec DWDirectiveRuleSpec, args map[string]string, uniqueMap map[string]bool) error { + + command, found := args["command"] + if !found { + return fmt.Errorf("no command in arguments") } - if dwdArgs[0] == "#DW" { - argsMap["command"] = dwdArgs[1] - for i := 2; i < len(dwdArgs); i++ { - keyValue := strings.Split(dwdArgs[i], "=") + if command != spec.Command { + return fmt.Errorf("command '%s' does not match rule '%s'", command, spec.Command) + } - // Don't allow repeated arguments - _, ok := argsMap[keyValue[0]] - if ok { - return nil, errors.New("repeated argument in directive: " + keyValue[0]) + // Create a map that maps a directive rule definition to an argument that correctly matches it + // key: DWDirectiveRule value: argument that matches that rule + // Required to check that all DWDirectiveRuleDef's have been met + argToRuleMap := map[*DWDirectiveRuleDef]string{} + + findRuleDefinition := func(key string) (*DWDirectiveRuleDef, error) { + for index, rule := range spec.RuleDefs { + re, err := regexp.Compile(rule.Key) + if err != nil { + return nil, fmt.Errorf("invalid rule regular expression '%s'", rule.Key) } - if len(keyValue) == 1 { - argsMap[keyValue[0]] = "true" - } else if len(keyValue) == 2 { - argsMap[keyValue[0]] = keyValue[1] - } else { - keyValue := strings.SplitN(dwdArgs[i], "=", 2) - argsMap[keyValue[0]] = keyValue[1] + if re.MatchString(key) { + return &spec.RuleDefs[index], nil } } - } else { - return nil, errors.New("missing #DW in directive") + + return nil, fmt.Errorf("unsupported argument '%s'", key) } - return argsMap, nil -} -// ValidateArgs validates a map of arguments against the rules -// For cases where an unknown command may be allowed because there may be other handlers for that command -// -// failUnknownCommand = false -func ValidateArgs(args map[string]string, rule DWDirectiveRuleSpec, uniqueMap map[string]bool, failUnknownCommand bool) error { - command := args["command"] + // Iterate over all arguments and validate each based on the associated rule + for k, v := range args { + if k == "command" { + continue + } - // Determine the rules map for command - rulesMap, err := BuildRulesMap(rule, command) - if err != nil { - // If the command is unsupported and we are supposed to fail in that case return error. - // Otherwise just return nil to effectively skip the #DW - // for info on errors.As() below see: - // https://stackoverflow.com/questions/62441960/error-wrap-unwrap-type-checking-with-errors-is#62442136 - var unsupportedCommand *dwUnsupportedCommandErr - if failUnknownCommand && errors.As(err, &unsupportedCommand) { + rule, err := findRuleDefinition(k) + if err != nil { return err } - return nil - } - // Compile this regex outside the loop for better performance. - var boolMatcher = regexp.MustCompile(`(?i)^(true|false)$`) // (?i) -> case-insensitve comparison - - // Create a map that maps a directive rule definition to an argument that correctly matches it - // key: DWDirectiveRule value: argument that matches that rule - // Required to check that all DWDirectiveRuleDef's have been met - argToRuleMap := map[DWDirectiveRuleDef]string{} + if rule.IsValueRequired && len(v) == 0 { + return fmt.Errorf("argument '%s' requires value", k) + } - // Iterate over all arguments and validate each based on the associated rule - for k, v := range args { - if k != "command" { - rule, found := rulesMap[k] - if !found { - return errors.New("unsupported argument - " + k) + switch rule.Type { + case "integer": + i, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("argument '%s' invalid integer '%s'", k, v) } - if rule.IsValueRequired && len(v) == 0 { - return errors.New("malformed keyword[=value]: " + k + "=" + v) + if rule.Max != 0 && i > rule.Max { + return fmt.Errorf("argument '%s' specified integer value %d is greather than maximum value %d", k, i, rule.Max) } - switch rule.Type { - case "integer": - // i,err := strconv.ParseInt(v, 10, 64) - i, err := strconv.Atoi(v) - if err != nil { - return errors.New("invalid integer argument: " + k + "=" + v) - } - if rule.Max != 0 && i > rule.Max { - return errors.New("specified integer exceeds maximum " + strconv.Itoa(rule.Max) + ": " + k + "=" + v) - } - if rule.Min != 0 && i < rule.Min { - return errors.New("specified integer smaller than minimum " + strconv.Itoa(rule.Min) + ": " + k + "=" + v) - } - case "bool": - if rule.Pattern != "" { - isok := boolMatcher.MatchString(v) - if !isok { - return errors.New("invalid bool argument: " + k + "=" + v) - } - } - case "string": - if rule.Pattern != "" { - isok, err := regexp.MatchString(rule.Pattern, v) - if !isok { - if err != nil { - return errors.New("invalid regexp in rule: " + rule.Pattern) - } - return errors.New("invalid argument: " + k + "=" + v) - } + if rule.Min != 0 && i < rule.Min { + return fmt.Errorf("argument '%s' specified integer value %d is less than minimum value %d", k, i, rule.Min) + } + case "bool": + if rule.Pattern != "" { + if !boolMatcher.MatchString(v) { + return fmt.Errorf("argument '%s' invalid boolean '%s'", k, v) } - default: - return errors.New("unsupported value type: " + rule.Type) } + case "string": + if rule.Pattern != "" { + re, err := regexp.Compile(rule.Pattern) + if err != nil { + return fmt.Errorf("invalid regular expression '%s'", rule.Pattern) + } - if rule.UniqueWithin != "" { - _, ok := uniqueMap[rule.UniqueWithin+"/"+v] - if ok { - return fmt.Errorf("Value '%s' must be unique within '%s'", v, rule.UniqueWithin) + if !re.MatchString(v) { + return fmt.Errorf("argument '%s' invalid string '%s'", k, v) } + } + default: + return fmt.Errorf("unsupported rule type '%s'", rule.Type) + } - uniqueMap[rule.UniqueWithin+"/"+v] = true + if rule.UniqueWithin != "" { + _, ok := uniqueMap[rule.UniqueWithin+"/"+v] + if ok { + return fmt.Errorf("value '%s' must be unique within '%s'", v, rule.UniqueWithin) } - // NOTE: We know that we don't have repeated arguments here because the arguments - // come to us in a map indexed by the argment name. - argToRuleMap[rule] = k + uniqueMap[rule.UniqueWithin+"/"+v] = true } + + // NOTE: We know that we don't have repeated arguments here because the arguments + // come to us in a map indexed by the argment name. + argToRuleMap[rule] = k } // Iterate over the rules to ensure all required rules have an argument - for k, v := range rulesMap { - // Ensure that each required rule has an argument - if v.IsRequired { - _, ok := argToRuleMap[v] - if !ok { - return errors.New("missing argument: " + k) + for index := range spec.RuleDefs { + rule := &spec.RuleDefs[index] + if rule.IsRequired { + if _, found := argToRuleMap[rule]; !found { + return fmt.Errorf("missing required argument '%v'", rule.Key) } } } @@ -234,8 +201,42 @@ func ValidateArgs(args map[string]string, rule DWDirectiveRuleSpec, uniqueMap ma return nil } -// ValidateDWDirective validates a set of #DW directives against a specified rule set -func ValidateDWDirective(rule DWDirectiveRuleSpec, dwd string, uniqueMap map[string]bool, failUnknownCommand bool) (bool, error) { +// Validate a list of directives against the supplied rules. When a directive is valid +// for a particular rule, the `onValidDirectiveFunc` function is called. +func Validate(rules []DWDirectiveRuleSpec, directives []string, onValidDirectiveFunc func(index int, rule DWDirectiveRuleSpec)) error { + + // Create a map to track argument uniqueness within the directives for + // rules that contain `UniqueWithin` + uniqueMap := make(map[string]bool) + + for index, directive := range directives { + + // A directive is validated against all rules; any one rule that is valid for a directive + // makes that directive valid. + validDirective := false + + for _, rule := range rules { + valid, err := validateDWDirective(rule, directive, uniqueMap) + if err != nil { + return err + } + + if valid { + validDirective = true + onValidDirectiveFunc(index, rule) + } + } + + if !validDirective { + return fmt.Errorf("invalid directive '%s'", directive) + } + } + + return nil +} + +// validateDWDirective validates an individual directive against a rule +func validateDWDirective(rule DWDirectiveRuleSpec, dwd string, uniqueMap map[string]bool) (bool, error) { // Build a map of the #DW commands and arguments argsMap, err := BuildArgsMap(dwd) @@ -243,19 +244,11 @@ func ValidateDWDirective(rule DWDirectiveRuleSpec, dwd string, uniqueMap map[str return false, err } - // If the command doesn't match... if argsMap["command"] != rule.Command { - // If we need to fail unknown commands, return invalid command - if failUnknownCommand { - return false, nil - } - - // Otherwise, we may have a new command that our code doesn't yet know - // Don't bother checking the rest - return true, nil + return false, nil } - err = ValidateArgs(argsMap, rule, uniqueMap, failUnknownCommand) + err = ValidateArgs(rule, argsMap, uniqueMap) if err != nil { return false, err } diff --git a/vendor/github.com/HewlettPackard/dws/utils/dwdparse/zz_generated.deepcopy.go b/vendor/github.com/HewlettPackard/dws/utils/dwdparse/zz_generated.deepcopy.go index 5d73dae9..d27486be 100644 --- a/vendor/github.com/HewlettPackard/dws/utils/dwdparse/zz_generated.deepcopy.go +++ b/vendor/github.com/HewlettPackard/dws/utils/dwdparse/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* - * Copyright 2022 Hewlett Packard Enterprise Development LP + * Copyright 2023 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, diff --git a/vendor/github.com/HewlettPackard/dws/utils/updater/status_updater.go b/vendor/github.com/HewlettPackard/dws/utils/updater/status_updater.go index ad3e2859..e80453ab 100644 --- a/vendor/github.com/HewlettPackard/dws/utils/updater/status_updater.go +++ b/vendor/github.com/HewlettPackard/dws/utils/updater/status_updater.go @@ -109,5 +109,5 @@ func (updater *statusUpdater[S]) close(ctx context.Context, c clientUpdater, err } - return nil + return err } diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models/model_storage_v1_9_0_storage.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models/model_storage_v1_9_0_storage.go index 6bb5916b..f8600ecd 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models/model_storage_v1_9_0_storage.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models/model_storage_v1_9_0_storage.go @@ -51,6 +51,8 @@ type StorageV190Storage struct { Links StorageV190Links `json:"Links,omitempty"` + Location ResourceLocation `json:"Location,omitempty"` + // The name of the resource or array member. Name string `json:"Name"` diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_access_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_access_types.go index c7d98458..f45c8acf 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_access_types.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_access_types.go @@ -35,8 +35,9 @@ type NnfAccessSpec struct { // TeardownState is the desired state of the workflow for this NNF Access resource to // be torn down and deleted. - // +kubebuilder:validation:Enum=data_in;pre_run;post_run;data_out - TeardownState string `json:"teardownState"` + // +kubebuilder:validation:Enum:=PreRun;PostRun;Teardown + // +kubebuilder:validation:Type:=string + TeardownState dwsv1alpha1.WorkflowState `json:"teardownState"` // Target specifies which storage targets the client should mount // - single: Only one of the storage the client can access @@ -44,6 +45,12 @@ type NnfAccessSpec struct { // +kubebuilder:validation:Enum=single;all Target string `json:"target"` + // UserID for the new mount. Currently only used for raw + UserID uint32 `json:"userID"` + + // GroupID for the new mount. Currently only used for raw + GroupID uint32 `json:"groupID"` + // ClientReference is for a client resource. (DWS) Computes is the only client // resource type currently supported ClientReference corev1.ObjectReference `json:"clientReference,omitempty"` diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_datamovement_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_datamovement_types.go index 14a4b5d7..f9512ef8 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_datamovement_types.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_datamovement_types.go @@ -20,6 +20,7 @@ package v1alpha1 import ( + dwsv1alpha1 "github.com/HewlettPackard/dws/api/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -179,13 +180,13 @@ const ( DataMovementTeardownStateLabel = "nnf.cray.hpe.com/teardown_state" ) -func AddDataMovementTeardownStateLabel(object metav1.Object, state string) { +func AddDataMovementTeardownStateLabel(object metav1.Object, state dwsv1alpha1.WorkflowState) { labels := object.GetLabels() if labels == nil { labels = make(map[string]string) } - labels[DataMovementTeardownStateLabel] = state + labels[DataMovementTeardownStateLabel] = string(state) object.SetLabels(labels) } diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_storage_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_storage_types.go index 27098351..05a49f4b 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_storage_types.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_storage_types.go @@ -37,6 +37,17 @@ type NnfNodeStorageSpec struct { // +kubebuilder:validation:Minimum:=0 Count int `json:"count"` + // User ID for file system + UserID uint32 `json:"userID"` + + // Group ID for file system + GroupID uint32 `json:"groupID"` + + // Set the owner and group permissions specified by UserID and GroupID. This is for + // Lustre file systems only, and should be set only after all Lustre targets are created. + // +kubebuilder:default:=false + SetOwnerGroup bool `json:"setOwnerGroup"` + // Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself // may split the storage among the available drives operating in the NNF Node. Capacity int64 `json:"capacity,omitempty"` @@ -100,6 +111,10 @@ type NnfNodeStorageStatus struct { // LustreStorageStatus describes the Lustre targets created here. LustreStorage LustreStorageStatus `json:"lustreStorage,omitempty"` + + // OwnerGroupStatus is the status of the operation for setting the owner and group + // of a file system + OwnerGroupStatus NnfResourceStatusType `json:"ownerGroupStatus,omitempty"` } // NnfNodeStorageNVMeStatus provides a way to uniquely identify an NVMe namespace diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_types.go index 658ddecc..75ebafab 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_types.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_node_types.go @@ -52,6 +52,9 @@ type NnfNodeStatus struct { Health NnfResourceHealthType `json:"health,omitempty"` + // Fenced is true when the NNF Node is fenced by the STONITH agent, and false otherwise. + Fenced bool `json:"fenced,omitempty"` + Capacity int64 `json:"capacity,omitempty"` CapacityAllocated int64 `json:"capacityAllocated,omitempty"` @@ -97,6 +100,7 @@ type NnfDriveStatus struct { //+kubebuilder:printcolumn:name="HEALTH",type="string",JSONPath=".status.health",description="Health of node" //+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Current status of node" //+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="POD",type="string",JSONPath=".spec.pod",description="Parent pod name",priority=1 // NnfNode is the Schema for the NnfNode API type NnfNode struct { diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_resource_status_type.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_resource_status_type.go index 310f6e97..34335763 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_resource_status_type.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_resource_status_type.go @@ -20,6 +20,8 @@ package v1alpha1 import ( + dwsv1alpha1 "github.com/HewlettPackard/dws/api/v1alpha1" + sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" ) @@ -93,6 +95,25 @@ func (rst NnfResourceStatusType) UpdateIfWorseThan(status *NnfResourceStatusType } } +func (rst NnfResourceStatusType) ConvertToDWSResourceStatus() dwsv1alpha1.ResourceStatus { + switch rst { + case ResourceStarting: + return dwsv1alpha1.StartingStatus + case ResourceReady: + return dwsv1alpha1.ReadyStatus + case ResourceDisabled: + return dwsv1alpha1.DisabledStatus + case ResourceNotPresent: + return dwsv1alpha1.NotPresentStatus + case ResourceOffline: + return dwsv1alpha1.OfflineStatus + case ResourceFailed: + return dwsv1alpha1.FailedStatus + default: + return dwsv1alpha1.UnknownStatus + } +} + // StaticResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. func StaticResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { switch s.State { diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_storage_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_storage_types.go index a5eac279..97faa8be 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_storage_types.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_storage_types.go @@ -85,6 +85,12 @@ type NnfStorageSpec struct { // +kubebuilder:default:=raw FileSystemType string `json:"fileSystemType"` + // User ID for file system + UserID uint32 `json:"userID"` + + // Group ID for file system + GroupID uint32 `json:"groupID"` + // AllocationSets is a list of different types of storage allocations to make. Each // AllocationSet describes an entire allocation spanning multiple Rabbits. For example, // an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw @@ -94,10 +100,10 @@ type NnfStorageSpec struct { // NnfStorageAllocationSetStatus contains the status information for an allocation set type NnfStorageAllocationSetStatus struct { - // Status reflects the status of this NNF Storage + // Status reflects the status of this allocation set Status NnfResourceStatusType `json:"status,omitempty"` - // Health reflects the health of this NNF Storage + // Health reflects the health of this allocation set Health NnfResourceHealthType `json:"health,omitempty"` // Error is the human readable error string @@ -121,6 +127,9 @@ type NnfStorageStatus struct { dwsv1alpha1.ResourceError `json:",inline"` + // Status reflects the status of this NNF Storage + Status NnfResourceStatusType `json:"status,omitempty"` + // TODO: Conditions } diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfcontainerprofile_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfcontainerprofile_types.go new file mode 100644 index 00000000..ee575c9f --- /dev/null +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfcontainerprofile_types.go @@ -0,0 +1,86 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ContainerLabel = "nnf.cray.hpe.com/container" +) + +// NnfContainerProfileSpec defines the desired state of NnfContainerProfile +type NnfContainerProfileData struct { + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // List of possible filesystems supported by this container profile + Storages []NnfContainerProfileStorage `json:"storages,omitempty"` + + // Stop any containers after X seconds once a workflow has transitioned to PostRun. Defaults to 0. + // A value of 0 disables this behavior. + PostRunTimeoutSeconds int64 `json:"postRunTimeoutSeconds,omitempty"` + + // Specifies the number of times a container will be retried upon a failure. A new pod is deployed on each retry. + // Defaults to 6 by kubernetes itself and must be set. A value of 0 disables retries. + // +kubebuilder:default:=6 + RetryLimit int32 `json:"retryLimit"` + + // Template defines the containers that will be created from container profile + Template corev1.PodTemplateSpec `json:"template"` +} + +// NnfContainerProfileStorage defines the mount point information that will be available to the +// container +type NnfContainerProfileStorage struct { + // Name specifies the name of the mounted filesystem; must match the user supplied #DW directive + Name string `json:"name"` + + // Optional designates that this filesystem is available to be mounted, but can be ignored by + // the user not supplying this filesystem in the #DW directives + //+kubebuilder:default:=false + Optional bool `json:"optional"` +} + +// +kubebuilder:object:root=true + +// NnfContainerProfile is the Schema for the nnfcontainerprofiles API +type NnfContainerProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Data NnfContainerProfileData `json:"data,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfContainerProfileList contains a list of NnfContainerProfile +type NnfContainerProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfContainerProfile `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfContainerProfile{}, &NnfContainerProfileList{}) +} diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_types.go index 4bb48182..ae78131b 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_types.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_types.go @@ -24,6 +24,27 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// NnfStorageProfileLustreCmdLines defines commandlines to use for mkfs, zpool, and other utilities. +type NnfStorageProfileLustreCmdLines struct { + // ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + // This is where you may specify zpool create options, and the virtual device (vdev) such as + // "mirror", or "draid". See zpoolconcepts(7). + ZpoolCreate string `json:"zpoolCreate,omitempty"` + + // Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + // Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + // Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + Mkfs string `json:"mkfs,omitempty"` +} + +// NnfStorageProfileLustreMiscOptions defines options to use for the mount library, and other utilities. +type NnfStorageProfileLustreMiscOptions struct { + // MountTarget specifies mount options for the mount-utils library to mount a lustre target on the Rabbit. + // For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions argument to mkfs.lustre instead. + // Use one array element per option. Do not prepend the options with "-o". + MountTarget []string `json:"mountTarget,omitempty"` +} + // NnfStorageProfileLustreData defines the Lustre-specific configuration type NnfStorageProfileLustreData struct { // CombinedMGTMDT indicates whether the MGT and MDT should be created on the same target device @@ -43,27 +64,105 @@ type NnfStorageProfileLustreData struct { // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" // +kubebuilder:default:="5GiB" CapacityMDT string `json:"capacityMdt,omitempty"` + + // ExclusiveMDT indicates that the MDT should not be colocated with any other target on the chosen server. + // +kubebuilder:default:=false + ExclusiveMDT bool `json:"exclusiveMdt,omitempty"` + + // MgtCmdLines contains commands to create an MGT target. + MgtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtCommandlines,omitempty"` + + // MdtCmdLines contains commands to create an MDT target. + MdtCmdLines NnfStorageProfileLustreCmdLines `json:"mdtCommandlines,omitempty"` + + // MgtMdtCmdLines contains commands to create a combined MGT/MDT target. + MgtMdtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtMdtCommandlines,omitempty"` + + // OstCmdLines contains commands to create an OST target. + OstCmdLines NnfStorageProfileLustreCmdLines `json:"ostCommandlines,omitempty"` + + // MgtOptions contains options to use for libraries used for an MGT target. + MgtOptions NnfStorageProfileLustreMiscOptions `json:"mgtOptions,omitempty"` + + // MdtOptions contains options to use for libraries used for an MDT target. + MdtOptions NnfStorageProfileLustreMiscOptions `json:"mdtOptions,omitempty"` + + // MgtMdtOptions contains options to use for libraries used for a combined MGT/MDT target. + MgtMdtOptions NnfStorageProfileLustreMiscOptions `json:"mgtMdtOptions,omitempty"` + + // OstOptions contains options to use for libraries used for an OST target. + OstOptions NnfStorageProfileLustreMiscOptions `json:"ostOptions,omitempty"` +} + +// NnfStorageProfileCmdLines defines commandlines to use for mkfs, and other utilities. +type NnfStorageProfileCmdLines struct { + // Mkfs specifies the mkfs commandline, minus the "mkfs". + Mkfs string `json:"mkfs,omitempty"` + + // PvCreate specifies the pvcreate commandline, minus the "pvcreate". + PvCreate string `json:"pvCreate,omitempty"` + + // VgCreate specifies the vgcreate commandline, minus the "vgcreate". + VgCreate string `json:"vgCreate,omitempty"` + + // VgChange specifies the various vgchange commandlines, minus the "vgchange" + VgChange NnfStorageProfileLVMVgChangeCmdLines `json:"vgChange,omitempty"` + + // VgCreate specifies the vgcreate commandline, minus the "vgremove". + VgRemove string `json:"vgRemove,omitempty"` + + // LvCreate specifies the lvcreate commandline, minus the "lvcreate". + LvCreate string `json:"lvCreate,omitempty"` + + // LvRemove specifies the lvcreate commandline, minus the "lvremove". + LvRemove string `json:"lvRemove,omitempty"` +} + +// NnfStorageProfileLVMVgChangeCmdLines +type NnfStorageProfileLVMVgChangeCmdLines struct { + // The vgchange commandline for activation, minus the "vgchange" command + Activate string `json:"activate,omitempty"` + + // The vgchange commandline for deactivation, minus the "vgchange" command + Deactivate string `json:"deactivate,omitempty"` + + // The vgchange commandline for lockStart, minus the "vgchange" command + LockStart string `json:"lockStart,omitempty"` +} + +// NnfStorageProfileMiscOptions defines options to use for the mount library, and other utilities. +type NnfStorageProfileMiscOptions struct { + // MountRabbit specifies mount options for mounting on the Rabbit. + // Use one array element per option. Do not prepend the options with "-o". + MountRabbit []string `json:"mountRabbit,omitempty"` + + // MountCompute specifies mount options for mounting on the Compute. + // Use one array element per option. Do not prepend the options with "-o". + MountCompute []string `json:"mountCompute,omitempty"` } // NnfStorageProfileGFS2Data defines the GFS2-specific configuration type NnfStorageProfileGFS2Data struct { - // Placeholder - // +kubebuilder:default:=true - Placeholder bool `json:"placeholder,omitempty"` + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Options contains options for libraries. + Options NnfStorageProfileMiscOptions `json:"options,omitempty"` } // NnfStorageProfileXFSData defines the XFS-specific configuration type NnfStorageProfileXFSData struct { - // Placeholder - // +kubebuilder:default:=true - Placeholder bool `json:"placeholder,omitempty"` + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Options contains options for libraries. + Options NnfStorageProfileMiscOptions `json:"options,omitempty"` } // NnfStorageProfileRawData defines the Raw-specific configuration type NnfStorageProfileRawData struct { - // Placeholder - // +kubebuilder:default:=true - Placeholder bool `json:"placeholder,omitempty"` + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` } // NnfStorageProfileData defines the desired state of NnfStorageProfile diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_webhook.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_webhook.go index 4d0af747..0c02b29e 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_webhook.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnfstorageprofile_webhook.go @@ -64,7 +64,7 @@ func (r *NnfStorageProfile) ValidateUpdate(old runtime.Object) error { // ownerReferences, and labels, but do not allow Data to be // updated. if !reflect.DeepEqual(r.Data, obj.Data) { - msg := "Update on pinned resource not allowed" + msg := "update on pinned resource not allowed" err := fmt.Errorf(msg) nnfstorageprofilelog.Error(err, "invalid") return err @@ -89,7 +89,7 @@ func (r *NnfStorageProfile) ValidateDelete() error { func (r *NnfStorageProfile) validateContent() error { if r.Data.Default && r.Data.Pinned { - return fmt.Errorf("The NnfStorageProfile cannot be both default and pinned.") + return fmt.Errorf("the NnfStorageProfile cannot be both default and pinned") } if err := r.validateContentLustre(); err != nil { return err @@ -99,7 +99,7 @@ func (r *NnfStorageProfile) validateContent() error { func (r *NnfStorageProfile) validateContentLustre() error { if r.Data.LustreStorage.CombinedMGTMDT && len(r.Data.LustreStorage.ExternalMGS) > 0 { - return fmt.Errorf("Cannot set both combinedMgtMdt and externalMgs") + return fmt.Errorf("cannot set both combinedMgtMdt and externalMgs") } return nil diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/workflow_helpers.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/workflow_helpers.go index d9cab084..0ea6b11e 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/workflow_helpers.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/workflow_helpers.go @@ -25,4 +25,20 @@ const ( // to filter the child objects by the directive index and only delete the // resources for the directive being processed DirectiveIndexLabel = "nnf.cray.hpe.com/directive_index" + + // PinnedStorageProfileLabelName is a label applied to NnfStorage objects to show + // which pinned storage profile is being used. + PinnedStorageProfileLabelName = "nnf.cray.hpe.com/pinned_storage_profile_name" + + // PinnedStorageProfileLabelNameSpace is a label applied to NnfStorage objects to show + // which pinned storage profile is being used. + PinnedStorageProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_storage_profile_namespace" + + // PinnedContainerProfileLabelName is a label applied to NnfStorage objects to show + // which pinned container profile is being used. + PinnedContainerProfileLabelName = "nnf.cray.hpe.com/pinned_container_profile_name" + + // PinnedContainerProfileLabelNameSpace is a label applied to NnfStorage objects to show + // which pinned container profile is being used. + PinnedContainerProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_container_profile_namespace" ) diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go index 7ebccca6..49df07a3 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* - * Copyright 2022 Hewlett Packard Enterprise Development LP + * Copyright 2023 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -171,6 +171,100 @@ func (in *NnfAccessStatus) DeepCopy() *NnfAccessStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfile) DeepCopyInto(out *NnfContainerProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfile. +func (in *NnfContainerProfile) DeepCopy() *NnfContainerProfile { + if in == nil { + return nil + } + out := new(NnfContainerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfContainerProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileData) DeepCopyInto(out *NnfContainerProfileData) { + *out = *in + if in.Storages != nil { + in, out := &in.Storages, &out.Storages + *out = make([]NnfContainerProfileStorage, len(*in)) + copy(*out, *in) + } + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileData. +func (in *NnfContainerProfileData) DeepCopy() *NnfContainerProfileData { + if in == nil { + return nil + } + out := new(NnfContainerProfileData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileList) DeepCopyInto(out *NnfContainerProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfContainerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileList. +func (in *NnfContainerProfileList) DeepCopy() *NnfContainerProfileList { + if in == nil { + return nil + } + out := new(NnfContainerProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfContainerProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileStorage) DeepCopyInto(out *NnfContainerProfileStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileStorage. +func (in *NnfContainerProfileStorage) DeepCopy() *NnfContainerProfileStorage { + if in == nil { + return nil + } + out := new(NnfContainerProfileStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfDataMovement) DeepCopyInto(out *NnfDataMovement) { *out = *in @@ -884,7 +978,7 @@ func (in *NnfStorageProfile) DeepCopyInto(out *NnfStorageProfile) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Data = in.Data + in.Data.DeepCopyInto(&out.Data) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfile. @@ -905,12 +999,28 @@ func (in *NnfStorageProfile) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileCmdLines) DeepCopyInto(out *NnfStorageProfileCmdLines) { + *out = *in + out.VgChange = in.VgChange +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileCmdLines. +func (in *NnfStorageProfileCmdLines) DeepCopy() *NnfStorageProfileCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileCmdLines) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileData) DeepCopyInto(out *NnfStorageProfileData) { *out = *in - out.LustreStorage = in.LustreStorage - out.GFS2Storage = in.GFS2Storage - out.XFSStorage = in.XFSStorage + in.LustreStorage.DeepCopyInto(&out.LustreStorage) + in.GFS2Storage.DeepCopyInto(&out.GFS2Storage) + in.XFSStorage.DeepCopyInto(&out.XFSStorage) out.RawStorage = in.RawStorage } @@ -927,6 +1037,8 @@ func (in *NnfStorageProfileData) DeepCopy() *NnfStorageProfileData { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileGFS2Data) DeepCopyInto(out *NnfStorageProfileGFS2Data) { *out = *in + out.CmdLines = in.CmdLines + in.Options.DeepCopyInto(&out.Options) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileGFS2Data. @@ -939,6 +1051,21 @@ func (in *NnfStorageProfileGFS2Data) DeepCopy() *NnfStorageProfileGFS2Data { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMVgChangeCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMVgChangeCmdLines. +func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopy() *NnfStorageProfileLVMVgChangeCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLVMVgChangeCmdLines) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileList) DeepCopyInto(out *NnfStorageProfileList) { *out = *in @@ -971,9 +1098,32 @@ func (in *NnfStorageProfileList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreCmdLines) DeepCopyInto(out *NnfStorageProfileLustreCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreCmdLines. +func (in *NnfStorageProfileLustreCmdLines) DeepCopy() *NnfStorageProfileLustreCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreCmdLines) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileLustreData) DeepCopyInto(out *NnfStorageProfileLustreData) { *out = *in + out.MgtCmdLines = in.MgtCmdLines + out.MdtCmdLines = in.MdtCmdLines + out.MgtMdtCmdLines = in.MgtMdtCmdLines + out.OstCmdLines = in.OstCmdLines + in.MgtOptions.DeepCopyInto(&out.MgtOptions) + in.MdtOptions.DeepCopyInto(&out.MdtOptions) + in.MgtMdtOptions.DeepCopyInto(&out.MgtMdtOptions) + in.OstOptions.DeepCopyInto(&out.OstOptions) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreData. @@ -986,9 +1136,55 @@ func (in *NnfStorageProfileLustreData) DeepCopy() *NnfStorageProfileLustreData { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreMiscOptions) DeepCopyInto(out *NnfStorageProfileLustreMiscOptions) { + *out = *in + if in.MountTarget != nil { + in, out := &in.MountTarget, &out.MountTarget + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreMiscOptions. +func (in *NnfStorageProfileLustreMiscOptions) DeepCopy() *NnfStorageProfileLustreMiscOptions { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreMiscOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileMiscOptions) DeepCopyInto(out *NnfStorageProfileMiscOptions) { + *out = *in + if in.MountRabbit != nil { + in, out := &in.MountRabbit, &out.MountRabbit + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MountCompute != nil { + in, out := &in.MountCompute, &out.MountCompute + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileMiscOptions. +func (in *NnfStorageProfileMiscOptions) DeepCopy() *NnfStorageProfileMiscOptions { + if in == nil { + return nil + } + out := new(NnfStorageProfileMiscOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileRawData) DeepCopyInto(out *NnfStorageProfileRawData) { *out = *in + out.CmdLines = in.CmdLines } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileRawData. @@ -1004,6 +1200,8 @@ func (in *NnfStorageProfileRawData) DeepCopy() *NnfStorageProfileRawData { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileXFSData) DeepCopyInto(out *NnfStorageProfileXFSData) { *out = *in + out.CmdLines = in.CmdLines + in.Options.DeepCopyInto(&out.Options) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileXFSData. diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml index 9b9ea919..6c019eb3 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml @@ -96,6 +96,10 @@ spec: - mounted - unmounted type: string + groupID: + description: GroupID for the new mount. Currently only used for raw + format: int32 + type: integer mountPath: description: MountPath for the storage target on the client type: string @@ -149,19 +153,33 @@ spec: - all type: string teardownState: + allOf: + - enum: + - Proposal + - Setup + - DataIn + - PreRun + - PostRun + - DataOut + - Teardown + - enum: + - PreRun + - PostRun + - Teardown description: TeardownState is the desired state of the workflow for this NNF Access resource to be torn down and deleted. - enum: - - data_in - - pre_run - - post_run - - data_out type: string + userID: + description: UserID for the new mount. Currently only used for raw + format: int32 + type: integer required: - desiredState + - groupID - storageReference - target - teardownState + - userID type: object status: description: NnfAccessStatus defines the observed state of NnfAccess diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml new file mode 100644 index 00000000..1d4e07b6 --- /dev/null +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml @@ -0,0 +1,7264 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: nnfcontainerprofiles.nnf.cray.hpe.com +spec: + group: nnf.cray.hpe.com + names: + kind: NnfContainerProfile + listKind: NnfContainerProfileList + plural: nnfcontainerprofiles + singular: nnfcontainerprofile + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NnfContainerProfile is the Schema for the nnfcontainerprofiles + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + data: + description: NnfContainerProfileSpec defines the desired state of NnfContainerProfile + properties: + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + postRunTimeoutSeconds: + description: Stop any containers after X seconds once a workflow has + transitioned to PostRun. Defaults to 0. A value of 0 disables this + behavior. + format: int64 + type: integer + retryLimit: + default: 6 + description: Specifies the number of times a container will be retried + upon a failure. A new pod is deployed on each retry. Defaults to + 6 by kubernetes itself and must be set. A value of 0 disables retries. + format: int32 + type: integer + storages: + description: List of possible filesystems supported by this container + profile + items: + description: NnfContainerProfileStorage defines the mount point + information that will be available to the container + properties: + name: + description: 'Name specifies the name of the mounted filesystem; + must match the user supplied #DW directive' + type: string + optional: + default: false + description: 'Optional designates that this filesystem is available + to be mounted, but can be ignored by the user not supplying + this filesystem in the #DW directives' + type: boolean + required: + - name + - optional + type: object + type: array + template: + description: Template defines the containers that will be created + from container profile + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: 'Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active + on the node relative to StartTime before the system will + actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to an update), the system may or may not try + to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to a pod label update), the system may or may + not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to + the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + anti-affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers + cannot currently be added or removed. There must be at least + one container in a Pod. Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. Double + $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in + the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is + starting. When a key exists in multiple sources, the + value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is + specified, or IfNotPresent otherwise. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should + take in response to container lifecycle events. Cannot + be updated. + properties: + postStart: + description: 'PostStart is called immediately after + a container is created. If the handler fails, + the container is terminated and restarted according + to its restart policy. Other management of the + container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup probe + failure, preemption, resource contention, etc. + The handler is not called if the container crashes + or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the + container will eventually terminate within the + Pod''s termination grace period (unless delayed + by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. + Container will be restarted if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that port + from being exposed. Any port which is listening on + the default "0.0.0.0" address inside a container will + be accessible from the network. Modifying this array + with strategic merge patch may corrupt the data. For + more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the + port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if + the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options + the container should be run with. If set, the fields + of SecurityContext override the equivalent fields + of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN Note that this field cannot + be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. Note that this + field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. Note that this field cannot be set + when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has + successfully initialized. If specified, no other probes + are executed until this completes successfully. If + this probe fails, the Pod will be restarted, just + as if the livenessProbe failed. This can be used to + provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time + to load data or warm a cache, than during steady-state + operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will + always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, + is empty until the first client attaches to stdin, + and then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such + as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be + updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot + be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters + specified here will be merged to the generated DNS configuration + based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This + will be appended to the base nameservers generated from + DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will + be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options + given in Options will override those that appear in + the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name + lookup. This will be appended to the base search paths + generated from DNSPolicy. Duplicated search paths will + be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', + 'Default' or 'None'. DNS parameters given in DNSConfig will + be merged with the policy selected with DNSPolicy. To have + DNS options set along with hostNetwork, you have to specify + DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information + about services should be injected into pod''s environment + variables, matching the syntax of Docker links. Optional: + Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. + Ephemeral containers may be run in an existing pod to perform + user-initiated actions such as debugging. This list cannot + be specified when creating a pod, and it cannot be modified + by updating the pod spec. In order to add an ephemeral container + to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container + that you may add to an existing Pod for user-initiated + activities such as debugging. Ephemeral containers have + no resource or scheduling guarantees, and they will not + be restarted when they exit or when a Pod is removed or + restarted. The kubelet may evict a Pod if an ephemeral + container causes the Pod to exceed its resource allocation. + \n To add an ephemeral container, use the ephemeralcontainers + subresource of an existing Pod. Ephemeral containers may + not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s + CMD is used if this is not provided. Variable references + $(VAR_NAME) are expanded using the container''s environment. + If a variable cannot be resolved, the reference in + the input string will be unchanged. Double $$ are + reduced to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The image''s ENTRYPOINT is used if this is + not provided. Variable references $(VAR_NAME) are + expanded using the container''s environment. If a + variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in + the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is + starting. When a key exists in multiple sources, the + value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is + specified, or IfNotPresent otherwise. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: 'PostStart is called immediately after + a container is created. If the handler fails, + the container is terminated and restarted according + to its restart policy. Other management of the + container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup probe + failure, preemption, resource contention, etc. + The handler is not called if the container crashes + or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the + container will eventually terminate within the + Pod''s termination grace period (unless delayed + by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified + as a DNS_LABEL. This name must be unique among all + containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the + port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral + containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the + security options the ephemeral container should be + run with. If set, the fields of SecurityContext override + the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN Note that this field cannot + be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. Note that this + field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. Note that this field cannot be set + when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will + always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, + is empty until the first client attaches to stdin, + and then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from + PodSpec that this ephemeral container targets. The + ephemeral container will be run in the namespaces + (IPC, PID, etc) of this container. If not set then + the ephemeral container uses the namespaces configured + in the Pod spec. \n The container runtime must implement + support for this feature. If the runtime does not + support namespace targeting then the result of setting + this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such + as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be + updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Subpath mounts are not allowed for ephemeral + containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot + be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and + IPs that will be injected into the pod's hosts file if specified. + This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and + hostnames that will be injected as an entry in the pod's + hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default + to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the + host's network namespace. If this option is set, the ports + that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default + to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default + to true. If set to true or not present, the pod will be + run in the host user namespace, useful for when the pod + needs a feature only available to the host user namespace, + such as loading a kernel module with CAP_SYS_MODULE. When + set to false, a new userns is created for the pod. Setting + false is useful for mitigating container breakout vulnerabilities + even allowing users to run their containers as root without + actually having root privileges on the host. This field + is alpha-level and is only honored by servers that enable + the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, + the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references + to secrets in the same namespace to use for pulling any + of the images used by this PodSpec. If specified, these + secrets will be passed to individual puller implementations + for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same + namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: 'List of initialization containers belonging + to the pod. Init containers are executed in order prior + to containers being started. If any init container fails, + the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or + normal container must be unique among all containers. Init + containers may not have Lifecycle actions, Readiness probes, + Liveness probes, or Startup probes. The resourceRequirements + of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, + and then using the max of of that value or the sum of the + normal containers. Limits are applied to init containers + in a similar fashion. Init containers cannot currently be + added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. Double + $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in + the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is + starting. When a key exists in multiple sources, the + value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is + specified, or IfNotPresent otherwise. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should + take in response to container lifecycle events. Cannot + be updated. + properties: + postStart: + description: 'PostStart is called immediately after + a container is created. If the handler fails, + the container is terminated and restarted according + to its restart policy. Other management of the + container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup probe + failure, preemption, resource contention, etc. + The handler is not called if the container crashes + or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the + container will eventually terminate within the + Pod''s termination grace period (unless delayed + by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. + Container will be restarted if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that port + from being exposed. Any port which is listening on + the default "0.0.0.0" address inside a container will + be accessible from the network. Modifying this array + with strategic merge patch may corrupt the data. For + more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the + port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if + the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options + the container should be run with. If set, the fields + of SecurityContext override the equivalent fields + of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN Note that this field cannot + be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. Note that this + field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. Note that this field cannot be set + when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has + successfully initialized. If specified, no other probes + are executed until this completes successfully. If + this probe fails, the Pod will be restarted, just + as if the livenessProbe failed. This can be used to + provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time + to load data or warm a cache, than during steady-state + operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will + always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, + is empty until the first client attaches to stdin, + and then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such + as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be + updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot + be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto + a specific node. If it is non-empty, the scheduler simply + schedules this pod onto that node, assuming that it fits + resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match + a node''s labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is + set. \n If the OS field is set to linux, the following fields + must be unset: -securityContext.windowsOptions \n If the + OS field is set to windows, following fields must be unset: + - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls + - spec.shareProcessNamespace - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. + The currently supported values are linux and windows. + Additional value may be defined in future and can be + one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and + treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated + with running a pod for a given RuntimeClass. This field + will be autopopulated at admission time by the RuntimeClass + admission controller. If the RuntimeClass admission controller + is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create + requests which have the overhead already set. If RuntimeClass + is configured and selected in the PodSpec, Overhead will + be set to the value defined in the corresponding RuntimeClass, + otherwise it will remain unset and treated as zero. More + info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting + pods with lower priority. One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components + use this field to find the priority of the pod. When Priority + Admission Controller is enabled, it prevents users from + setting this field. The admission controller populates this + field from PriorityClassName. The higher the value, the + higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which + indicate the highest priorities with the former being the + highest priority. Any other name must be defined by creating + a PriorityClass object with that name. If not specified, + the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated + for pod readiness. A pod is ready when all its containers + are ready AND all conditions specified in the readiness + gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the + pod. One of Always, OnFailure, Never. Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object + in the node.k8s.io group, which should be used to run this + pod. If no RuntimeClass resource matches the named class, + the pod will not be run. If unset or empty, the "legacy" + RuntimeClass will be used, which is an implicit class with + an empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified + scheduler. If not specified, the pod will be dispatched + by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes + and common container settings. Optional: Defaults to empty. See + type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies + to all containers in a pod. Some volume types allow + the Kubelet to change the ownership of that volume to + be owned by the pod: \n 1. The owning GID will be the + FSGroup 2. The setgid bit is set (new files created + in the volume will be owned by FSGroup) 3. The permission + bits are OR'd with rw-rw---- \n If unset, the Kubelet + will not modify the ownership and permissions of any + volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of + changing ownership and permission of the volume before + being exposed inside Pod. This field will only apply + to volume types which support fsGroup based ownership(and + permissions). It will have no effect on ephemeral volume + types such as: secret, configmaps and emptydir. Valid + values are "OnRootMismatch" and "Always". If not specified, + "Always" is used. Note that this field cannot be set + when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be + set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as + a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not run + as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be + performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence + for that container. Note that this field cannot be set + when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all + containers. If unspecified, the container runtime will + allocate a random SELinux context for each container. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers + in this pod. Note that this field cannot be set when + spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. The + profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's + configured seccomp profile location. Must only be + set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: \n Localhost + - a profile defined in a file on the node should + be used. RuntimeDefault - the container runtime + default profile should be used. Unconfined - no + profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process + run in each container, in addition to the container's + primary GID. If unspecified, no groups will be added + to any container. Note that this field cannot be set + when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls + used for the pod. Pods with unsupported sysctls (by + the container runtime) might fail to launch. Note that + this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to + all containers. If unspecified, the options within a + container's SecurityContext will be used. If set in + both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec + named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. This + field is alpha-level and will only be honored by + components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature + flag will result in errors when validating the Pod. + All of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias + for ServiceAccountName. Deprecated: Use serviceAccountName + instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount + to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured + as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the + hostname field of the kernel (the nodename field of struct + utsname). In Windows containers, this means setting the + registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters + to FQDN. If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all + of the containers in a pod. When this is set containers + will be able to view and signal processes from other containers + in the same pod, and the first process in each container + will not be assigned PID 1. HostPID and ShareProcessNamespace + cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname + will be "...svc.". If not specified, the pod will not have a domainname + at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). If this value is nil, the default grace period + will be used instead. The grace period is the duration in + seconds after the processes running in the pod are sent + a termination signal and the time when the processes are + forcibly halted with a kill signal. Set this value longer + than the expected cleanup time for your process. Defaults + to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group + of pods ought to spread across topology domains. Scheduler + will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching + pods. Pods that match this label selector are counted + to determine the number of pods in their corresponding + topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are ANDed with + labelSelector to select the group of existing pods + over which spreading will be calculated for the incoming + pod. Keys that don't exist in the incoming pod labels + will be ignored. A null or empty list means only match + against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the + number of matching pods in the target topology and + the global minimum. The global minimum is the minimum + number of matching pods in an eligible domain or zero + if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to + 1, and pods with the same labelSelector spread as + 2/2/1: In this case, the global minimum is 1. | zone1 + | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 + to become 2/2/2; scheduling it onto zone1(zone2) would + make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto + any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default value + is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number + of eligible domains. When the number of eligible domains + with matching topology keys is less than minDomains, + Pod Topology Spread treats \"global minimum\" as 0, + and then the calculation of Skew is performed. And + when the number of eligible domains with matching + topology keys equals or greater than minDomains, this + value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to + those domains. If value is nil, the constraint behaves + as if MinDomains is equal to 1. Valid values are integers + greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone + cluster, MaxSkew is set to 2, MinDomains is set to + 5 and pods with the same labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), + so \"global minimum\" is treated as 0. In this situation, + new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod + is scheduled to any of the three zones, it will violate + MaxSkew. \n This is a beta field and requires the + MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will + treat Pod's nodeAffinity/nodeSelector when calculating + pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included + in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + \n If this value is nil, the behavior is equivalent + to the Honor policy. This is a alpha-level feature + enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will + treat node taints when calculating pod topology spread + skew. Options are: - Honor: nodes without taints, + along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints + are ignored. All nodes are included. \n If this value + is nil, the behavior is equivalent to the Ignore policy. + This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and try + to put balanced number of pods into each bucket. We + define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose + nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal + with a pod if it doesn''t satisfy the spread constraint. + - DoNotSchedule (default) tells the scheduler not + to schedule it. - ScheduleAnyway tells the scheduler + to schedule the pod in any location, but giving higher + precedence to topologies that would help reduce the + skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node + assignment for that pod would violate "MaxSkew" on + some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P + | P | P | If WhenUnsatisfiable is set to DoNotSchedule, + incoming pod can only be scheduled to zone2(zone3) + to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) + satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make + it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers + belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS + Disk resource that is attached to a kubelet''s host + machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the + volume that you want to mount. If omitted, the + default is to mount by volume name. Examples: + For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda + is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the + readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent + disk resource in AWS (Amazon EBS volume). More + info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is + a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to + false (read/write). ReadOnly here will force the + ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados + user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached + and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a + secret object containing parameters used to connect + to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. Must + be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON + requires decimal values for mode bits. If + not specified, the volume defaultMode will + be used. This might be in conflict with + other options that affect the file mode, + like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be an + absolute path. May not contain the path + element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. Consult with your admin + for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed + to the associated CSI driver which will determine + the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference + to the secret object containing sensitive information + to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no + secret is required. If the secret object contains + more than one secret, all secret references are + passed. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific + properties that are passed to the CSI driver. + Consult your driver's documentation for supported + values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be an + octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both + octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This + might be in conflict with other options + that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory + that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default + is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local + storage required for this EmptyDir volume. The + size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. The default is nil which means that + the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is + handled by a cluster storage driver. The volume's + lifecycle is tied to the pod that defines it - it + will be created before the pod starts, and deleted + when the pod is removed. \n Use this if: a) the volume + is only needed while the pod runs, b) features of + normal volumes like restoring from snapshot or capacity + tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver + supports dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on + the connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the + lifecycle of an individual pod. \n Use CSI for light-weight + local ephemeral volumes if the CSI driver is meant + to be used that way - see the documentation of the + driver for more information. \n A pod can use both + types of ephemeral volumes and persistent volumes + at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone + PVC to provision the volume. The pod in which + this EphemeralVolumeSource is embedded will be + the owner of the PVC, i.e. the PVC will be deleted + together with the pod. The name of the PVC will + be `-` where `` + is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too + long). \n An existing PVC with that name that + is not owned by the pod will *not* be used for + the pod to avoid using an unrelated volume by + mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created + PVC is meant to be used by the pod, the PVC has + to updated with an owner reference to the pod + once the pod exists. Normally this should not + be necessary, but it may be useful when manually + reconstructing a broken cluster. \n This field + is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, + must not be nil." + properties: + metadata: + description: May contain labels and annotations + that will be copied into the PVC when creating + it. No other fields are allowed and will be + rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into + the PVC that gets created from this template. + The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired + access modes the volume should have. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used + to specify either: * An existing VolumeSnapshot + object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller + can support the specified data source, + it will create a new volume based on the + contents of the specified data source. + If the AnyVolumeDataSource feature gate + is enabled, this field will always have + the same contents as the DataSourceRef + field.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If + APIGroup is not specified, the specified + Kind must be in the core API group. + For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the + object from which to populate the volume + with data, if a non-empty volume is desired. + This may be any local object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, + volume binding will only succeed if the + type of the specified object matches some + installed volume populator or dynamic + provisioner. This field will replace the + functionality of the DataSource field + and as such if both fields are non-empty, + they must have the same value. For backwards + compatibility, both fields (DataSource + and DataSourceRef) will be set to the + same value automatically if one of them + is empty and the other is non-empty. There + are two important differences between + DataSource and DataSourceRef: * While + DataSource only allows two specific types + of objects, DataSourceRef allows any non-core + object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed + values (dropping them), DataSourceRef + preserves all values, and generates an + error if a disallowed value is specified. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If + APIGroup is not specified, the specified + Kind must be in the core API group. + For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + resources: + description: 'resources represents the minimum + resources the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to + specify resource requirements that are + lower than previous value but must still + be higher than capacity recorded in the + status field of the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the + minimum amount of compute resources + required. If Requests is omitted for + a container, it defaults to Limits + if that is explicitly specified, otherwise + to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name + of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. Value + of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. TODO: how + do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to + false (read/write). ReadOnly here will force the + ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide + identifiers (wwids) Either wwids or combination + of targetWWNs and lun must be set, but not both + simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume + resource that is provisioned/attached using an exec + based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". The + default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to + false (read/write). ReadOnly here will force the + ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is + reference to the secret object containing sensitive + information to pass to the plugin scripts. This + may be empty if no secret object is specified. + If the secret object contains more than one secret, + all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: datasetName is Name of the dataset + stored as metadata -> name on the dataset for + Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the + volume that you want to mount. If omitted, the + default is to mount by volume name. Examples: + For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda + is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource + in GCE. Used to identify the disk in GCE. More + info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at + a particular revision. DEPRECATED: GitRepo is deprecated. + To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo + using git, then mount the EmptyDir into the Pod''s + container.' + properties: + directory: + description: directory is the target directory name. + Must not contain or start with '..'. If '.' is + supplied, the volume directory will be the git + repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file + or directory on the host machine that is directly + exposed to the container. This is generally used for + system agents or other privileged things that are + allowed to see the host machine. Most containers will + NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use + host directory mounts and who can/can not mount host + directories as read/write.' + properties: + path: + description: 'path of the directory on the host. + If the path is a symlink, it will follow the link + to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults + to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource + that is attached to a kubelet''s host machine and + then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name + that uses an iSCSI transport. Defaults to 'default' + (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. The portal is either an IP or ip_addr:port + if the port is other than default (typically TCP + ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + The Portal is either an IP or ip_addr:port if + the port is other than default (typically TCP + ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host + that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export + to be mounted with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same + namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting + in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type + to mount Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs". + Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path + are not affected by this setting. This might be + in conflict with other options that affect the + file mode, like fsGroup, and the result can be + other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced ConfigMap will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will be + projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not present + in the ConfigMap, the volume setup will + error unless it is marked optional. + Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 or + a decimal value between 0 and + 511. YAML accepts both octal and + decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume defaultMode + will be used. This might be in + conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value between + 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts + both octal and decimal values, + JSON requires decimal values for + mode bits. If not specified, the + volume defaultMode will be used. + This might be in conflict with + other options that affect the + file mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, requests.cpu and + requests.memory) are currently + supported.' + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced Secret will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will be + projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not present + in the Secret, the volume setup will + error unless it is marked optional. + Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 or + a decimal value between 0 and + 511. YAML accepts both octal and + decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume defaultMode + will be used. This might be in + conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended + audience of the token. A recipient of + a token must identify itself with an + identifier specified in the audience + of the token, and otherwise should reject + the token. The audience defaults to + the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the + requested duration of validity of the + service account token. As the token + approaches expiration, the kubelet volume + plugin will proactively rotate the service + account token. The kubelet will start + trying to rotate the token if the token + is older than 80 percent of its time + to live or if the token is older than + 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative + to the mount point of the file to project + the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default + is no group + type: string + readOnly: + description: readOnly here will force the Quobyte + volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple + Quobyte Registry services specified as a string + as host:port pair (multiple entries are separated + with commas) which acts as the central registry + for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume + in the Backend Used with dynamically provisioned + Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults + to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + image: + description: 'image is the rados image name. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for + RBDUser. Default is /etc/ceph/keyring. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default + is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default + is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Default + is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret + for ScaleIO user and other sensitive information. + If this is not provided, Login operation will + fail. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume + already created in the ScaleIO system that is + associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should + populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. Must + be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON + requires decimal values for mode bits. If + not specified, the volume defaultMode will + be used. This might be in conflict with + other options that affect the file mode, + like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be an + absolute path. May not contain the path + element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use + for obtaining the StorageOS API credentials. If + not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable name + of the StorageOS volume. Volume names are only + unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. If no namespace + is specified then the Pod's namespace will be + used. This allows the Kubernetes name scoping + to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default + behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do + not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + required: + - retryLimit + - template + type: object + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + type: object + served: true + storage: true diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml index de3c8f14..4b399ea7 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml @@ -31,6 +31,11 @@ spec: - jsonPath: .metadata.creationTimestamp name: AGE type: date + - description: Parent pod name + jsonPath: .spec.pod + name: POD + priority: 1 + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -121,6 +126,10 @@ spec: type: integer type: object type: array + fenced: + description: Fenced is true when the NNF Node is fenced by the STONITH + agent, and false otherwise. + type: boolean health: description: NnfResourceHealthType defines the health of an NNF resource. type: string diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml index 17e18264..84600b32 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml @@ -80,6 +80,10 @@ spec: - gfs2 - lustre type: string + groupID: + description: Group ID for file system + format: int32 + type: integer lustreStorage: description: LustreStorageSpec describes the Lustre target created here, if FileSystemType specifies a Lustre target. @@ -116,10 +120,23 @@ spec: - OST type: string type: object + setOwnerGroup: + default: false + description: Set the owner and group permissions specified by UserID + and GroupID. This is for Lustre file systems only, and should be + set only after all Lustre targets are created. + type: boolean + userID: + description: User ID for file system + format: int32 + type: integer required: - clientEndpoints - count - fileSystemType + - groupID + - setOwnerGroup + - userID type: object status: description: NnfNodeStorageStatus defines the status for NnfNodeStorage @@ -369,6 +386,10 @@ spec: is populated on MGS nodes only. type: string type: object + ownerGroupStatus: + description: OwnerGroupStatus is the status of the operation for setting + the owner and group of a file system + type: string type: object type: object served: true diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml index d4bef227..18832353 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml @@ -44,10 +44,70 @@ spec: gfs2Storage: description: GFS2Storage defines the GFS2-specific configuration properties: - placeholder: - default: true - description: Placeholder - type: boolean + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + activate: + description: The vgchange commandline for activation, + minus the "vgchange" command + type: string + deactivate: + description: The vgchange commandline for deactivation, + minus the "vgchange" command + type: string + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + options: + description: Options contains options for libraries. + properties: + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. Use one array element per option. Do not + prepend the options with "-o". + items: + type: string + type: array + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. Use one array element per option. Do not + prepend the options with "-o". + items: + type: string + type: array + type: object type: object lustreStorage: description: LustreStorage defines the Lustre-specific configuration @@ -68,10 +128,144 @@ spec: description: CombinedMGTMDT indicates whether the MGT and MDT should be created on the same target device type: boolean + exclusiveMdt: + default: false + description: ExclusiveMDT indicates that the MDT should not be + colocated with any other target on the chosen server. + type: boolean externalMgs: description: ExternalMGS contains the NIDs of a pre-existing MGS that should be used type: string + mdtCommandlines: + description: MdtCmdLines contains commands to create an MDT target. + properties: + mkfs: + description: Mkfs specifies the mkfs.lustre commandline, minus + the "mkfs.lustre". Use the --mkfsoptions argument to specify + the zfs create options. See zfsprops(7). Use the --mountfsoptions + argument to specify persistent mount options for the lustre + targets. + type: string + zpoolCreate: + description: ZpoolCreate specifies the zpool create commandline, + minus the "zpool create". This is where you may specify + zpool create options, and the virtual device (vdev) such + as "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mdtOptions: + description: MdtOptions contains options to use for libraries + used for an MDT target. + properties: + mountTarget: + description: MountTarget specifies mount options for the mount-utils + library to mount a lustre target on the Rabbit. For persistent + mount options for lustre targets, do not use this array; + use the --mountfsoptions argument to mkfs.lustre instead. + Use one array element per option. Do not prepend the options + with "-o". + items: + type: string + type: array + type: object + mgtCommandlines: + description: MgtCmdLines contains commands to create an MGT target. + properties: + mkfs: + description: Mkfs specifies the mkfs.lustre commandline, minus + the "mkfs.lustre". Use the --mkfsoptions argument to specify + the zfs create options. See zfsprops(7). Use the --mountfsoptions + argument to specify persistent mount options for the lustre + targets. + type: string + zpoolCreate: + description: ZpoolCreate specifies the zpool create commandline, + minus the "zpool create". This is where you may specify + zpool create options, and the virtual device (vdev) such + as "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mgtMdtCommandlines: + description: MgtMdtCmdLines contains commands to create a combined + MGT/MDT target. + properties: + mkfs: + description: Mkfs specifies the mkfs.lustre commandline, minus + the "mkfs.lustre". Use the --mkfsoptions argument to specify + the zfs create options. See zfsprops(7). Use the --mountfsoptions + argument to specify persistent mount options for the lustre + targets. + type: string + zpoolCreate: + description: ZpoolCreate specifies the zpool create commandline, + minus the "zpool create". This is where you may specify + zpool create options, and the virtual device (vdev) such + as "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mgtMdtOptions: + description: MgtMdtOptions contains options to use for libraries + used for a combined MGT/MDT target. + properties: + mountTarget: + description: MountTarget specifies mount options for the mount-utils + library to mount a lustre target on the Rabbit. For persistent + mount options for lustre targets, do not use this array; + use the --mountfsoptions argument to mkfs.lustre instead. + Use one array element per option. Do not prepend the options + with "-o". + items: + type: string + type: array + type: object + mgtOptions: + description: MgtOptions contains options to use for libraries + used for an MGT target. + properties: + mountTarget: + description: MountTarget specifies mount options for the mount-utils + library to mount a lustre target on the Rabbit. For persistent + mount options for lustre targets, do not use this array; + use the --mountfsoptions argument to mkfs.lustre instead. + Use one array element per option. Do not prepend the options + with "-o". + items: + type: string + type: array + type: object + ostCommandlines: + description: OstCmdLines contains commands to create an OST target. + properties: + mkfs: + description: Mkfs specifies the mkfs.lustre commandline, minus + the "mkfs.lustre". Use the --mkfsoptions argument to specify + the zfs create options. See zfsprops(7). Use the --mountfsoptions + argument to specify persistent mount options for the lustre + targets. + type: string + zpoolCreate: + description: ZpoolCreate specifies the zpool create commandline, + minus the "zpool create". This is where you may specify + zpool create options, and the virtual device (vdev) such + as "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + ostOptions: + description: OstOptions contains options to use for libraries + used for an OST target. + properties: + mountTarget: + description: MountTarget specifies mount options for the mount-utils + library to mount a lustre target on the Rabbit. For persistent + mount options for lustre targets, do not use this array; + use the --mountfsoptions argument to mkfs.lustre instead. + Use one array element per option. Do not prepend the options + with "-o". + items: + type: string + type: array + type: object type: object pinned: default: false @@ -80,18 +274,120 @@ spec: rawStorage: description: RawStorage defines the Raw-specific configuration properties: - placeholder: - default: true - description: Placeholder - type: boolean + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + activate: + description: The vgchange commandline for activation, + minus the "vgchange" command + type: string + deactivate: + description: The vgchange commandline for deactivation, + minus the "vgchange" command + type: string + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object type: object xfsStorage: description: XFSStorage defines the XFS-specific configuration properties: - placeholder: - default: true - description: Placeholder - type: boolean + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + activate: + description: The vgchange commandline for activation, + minus the "vgchange" command + type: string + deactivate: + description: The vgchange commandline for deactivation, + minus the "vgchange" command + type: string + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + options: + description: Options contains options for libraries. + properties: + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. Use one array element per option. Do not + prepend the options with "-o". + items: + type: string + type: array + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. Use one array element per option. Do not + prepend the options with "-o". + items: + type: string + type: array + type: object type: object required: - gfs2Storage diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml index d3e50b37..1068246c 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml @@ -117,9 +117,19 @@ spec: - gfs2 - lustre type: string + groupID: + description: Group ID for file system + format: int32 + type: integer + userID: + description: User ID for file system + format: int32 + type: integer required: - allocationSets - fileSystemType + - groupID + - userID type: object status: description: NnfStorageStatus defines the observed status of NNF Storage. @@ -139,10 +149,10 @@ spec: description: Error is the human readable error string type: string health: - description: Health reflects the health of this NNF Storage + description: Health reflects the health of this allocation set type: string status: - description: Status reflects the status of this NNF Storage + description: Status reflects the status of this allocation set type: string required: - allocationCount @@ -169,6 +179,9 @@ spec: mgsNode: description: MgsNode is the NID of the MGS. type: string + status: + description: Status reflects the status of this NNF Storage + type: string type: object type: object served: true diff --git a/vendor/modules.txt b/vendor/modules.txt index ff2bfa1d..fd57da46 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -20,31 +20,41 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/HewlettPackard/dws v0.0.0-20221027172242-11a787dffb4c +# github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075 ## explicit; go 1.19 github.com/HewlettPackard/dws/api/v1alpha1 github.com/HewlettPackard/dws/utils/dwdparse github.com/HewlettPackard/dws/utils/updater +# github.com/HewlettPackard/structex v1.0.2 +## explicit; go 1.14 # github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988 ## explicit; go 1.19 github.com/NearNodeFlash/lustre-fs-operator/api/v1alpha1 github.com/NearNodeFlash/lustre-fs-operator/config/crd/bases -# github.com/NearNodeFlash/nnf-ec v0.0.0-20220920142937-1080197e523b +# github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f ## explicit; go 1.16 github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models -# github.com/NearNodeFlash/nnf-sos v0.0.0-20221012202709-192706b48299 +# github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a ## explicit; go 1.19 github.com/NearNodeFlash/nnf-sos/api/v1alpha1 github.com/NearNodeFlash/nnf-sos/config/crd/bases # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile +# github.com/cespare/xxhash v1.1.0 +## explicit # github.com/cespare/xxhash/v2 v2.1.2 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/dgraph-io/badger/v3 v3.2103.1 +## explicit; go 1.12 +# github.com/dgraph-io/ristretto v0.1.0 +## explicit; go 1.12 +# github.com/dustin/go-humanize v1.0.0 +## explicit # github.com/emicklei/go-restful/v3 v3.9.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -79,6 +89,8 @@ github.com/gogo/protobuf/sortkeys # github.com/golang-jwt/jwt/v4 v4.4.2 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 +# github.com/golang/glog v1.0.0 +## explicit; go 1.11 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -90,6 +102,10 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/golang/snappy v0.0.4 +## explicit +# github.com/google/flatbuffers v2.0.0+incompatible +## explicit # github.com/google/gnostic v0.6.9 ## explicit; go 1.12 github.com/google/gnostic/compiler @@ -111,6 +127,8 @@ github.com/google/gofuzz/bytesource # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid +# github.com/gorilla/mux v1.8.0 +## explicit; go 1.12 # github.com/imdario/mergo v0.3.13 ## explicit; go 1.13 github.com/imdario/mergo @@ -120,14 +138,20 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/klauspost/compress v1.13.5 +## explicit; go 1.15 # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-isatty v0.0.14 +## explicit; go 1.12 # github.com/matttproud/golang_protobuf_extensions v1.0.2 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/moby/sys/mountinfo v0.6.2 +## explicit; go 1.16 # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -166,6 +190,8 @@ github.com/onsi/gomega/types # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/pkg/term v1.1.0 +## explicit; go 1.14 # github.com/prometheus/client_golang v1.13.0 ## explicit; go 1.17 github.com/prometheus/client_golang/prometheus @@ -185,12 +211,26 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/rs/cors v1.8.0 +## explicit; go 1.13 +# github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46 +## explicit +# github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c +## explicit +# github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144 +## explicit +# github.com/sirupsen/logrus v1.9.0 +## explicit; go 1.13 # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag # github.com/takama/daemon v1.0.0 ## explicit; go 1.14 github.com/takama/daemon +# go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29 +## explicit; go 1.16 +# go.opencensus.io v0.23.0 +## explicit; go 1.13 # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 go.uber.org/atomic @@ -659,6 +699,8 @@ k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec +# k8s.io/mount-utils v0.25.2 +## explicit; go 1.19 # k8s.io/utils v0.0.0-20220922133306-665eaaec4324 ## explicit; go 1.18 k8s.io/utils/buffer From d4583da3ee267ef38df8896661e5a17adb48b883 Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Mon, 6 Mar 2023 14:10:06 -0600 Subject: [PATCH 06/24] Update nnf-sos vendor Signed-off-by: Blake Devcich --- go.mod | 80 +- go.sum | 626 +------ .../go/compute/internal/version.go | 18 + .../go/compute/metadata/CHANGES.md | 19 + .../go/compute/metadata/LICENSE | 202 ++ .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 3 +- .../go/compute/metadata/tidyfix.go | 23 + ...types.go => system_configuration_types.go} | 7 + .../dws/api/v1alpha1/zz_generated.deepcopy.go | 6 + .../api/v1alpha1/nnf_port_manager_types.go | 136 ++ .../api/v1alpha1/zz_generated.deepcopy.go | 144 ++ .../nnf.cray.hpe.com_nnfportmanagers.yaml | 222 +++ vendor/github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 ++-- .../cespare/xxhash/v2/xxhash_arm64.s | 183 ++ .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../go-task/slim-sprig/.editorconfig | 14 + .../go-task/slim-sprig/.gitattributes | 1 + .../github.com/go-task/slim-sprig/.gitignore | 2 + .../go-task/slim-sprig/CHANGELOG.md | 364 ++++ .../github.com/go-task/slim-sprig/LICENSE.txt | 19 + .../github.com/go-task/slim-sprig/README.md | 73 + .../go-task/slim-sprig/Taskfile.yml | 12 + .../github.com/go-task/slim-sprig/crypto.go | 24 + vendor/github.com/go-task/slim-sprig/date.go | 152 ++ .../github.com/go-task/slim-sprig/defaults.go | 163 ++ vendor/github.com/go-task/slim-sprig/dict.go | 118 ++ vendor/github.com/go-task/slim-sprig/doc.go | 19 + .../go-task/slim-sprig/functions.go | 317 ++++ vendor/github.com/go-task/slim-sprig/list.go | 464 +++++ .../github.com/go-task/slim-sprig/network.go | 12 + .../github.com/go-task/slim-sprig/numeric.go | 228 +++ .../github.com/go-task/slim-sprig/reflect.go | 28 + vendor/github.com/go-task/slim-sprig/regex.go | 83 + .../github.com/go-task/slim-sprig/strings.go | 189 ++ vendor/github.com/go-task/slim-sprig/url.go | 66 + vendor/github.com/google/pprof/AUTHORS | 7 + vendor/github.com/google/pprof/CONTRIBUTORS | 16 + vendor/github.com/google/pprof/LICENSE | 202 ++ .../github.com/google/pprof/profile/encode.go | 588 ++++++ .../github.com/google/pprof/profile/filter.go | 274 +++ .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 ++++ .../google/pprof/profile/legacy_profile.go | 1228 ++++++++++++ .../github.com/google/pprof/profile/merge.go | 568 ++++++ .../google/pprof/profile/profile.go | 813 ++++++++ .../github.com/google/pprof/profile/proto.go | 367 ++++ .../github.com/google/pprof/profile/prune.go | 194 ++ vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 190 +- vendor/github.com/onsi/ginkgo/v2/README.md | 56 +- vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 67 +- .../onsi/ginkgo/v2/decorator_dsl.go | 14 +- .../onsi/ginkgo/v2/formatter/formatter.go | 61 +- .../ginkgo/v2/ginkgo/build/build_command.go | 63 + .../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/command.go | 50 + .../onsi/ginkgo/v2/ginkgo/command/program.go | 182 ++ .../ginkgo/generators/boostrap_templates.go | 48 + .../v2/ginkgo/generators/bootstrap_command.go | 133 ++ .../v2/ginkgo/generators/generate_command.go | 259 +++ .../ginkgo/generators/generate_templates.go | 41 + .../v2/ginkgo/generators/generators_common.go | 64 + .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 152 ++ .../ginkgo/internal/profiles_and_reports.go | 237 +++ .../onsi/ginkgo/v2/ginkgo/internal/run.go | 355 ++++ .../ginkgo/v2/ginkgo/internal/test_suite.go | 283 +++ .../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 + .../v2/ginkgo/internal/verify_version.go | 54 + .../ginkgo/v2/ginkgo/labels/labels_command.go | 123 ++ .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 302 +++ .../onsi/ginkgo/v2/ginkgo/outline/import.go | 65 + .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 110 ++ .../v2/ginkgo/outline/outline_command.go | 98 + .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 232 +++ .../v2/ginkgo/unfocus/unfocus_command.go | 186 ++ .../onsi/ginkgo/v2/ginkgo/watch/delta.go | 22 + .../ginkgo/v2/ginkgo/watch/delta_tracker.go | 75 + .../ginkgo/v2/ginkgo/watch/dependencies.go | 92 + .../ginkgo/v2/ginkgo/watch/package_hash.go | 108 ++ .../ginkgo/v2/ginkgo/watch/package_hashes.go | 85 + .../onsi/ginkgo/v2/ginkgo/watch/suite.go | 87 + .../ginkgo/v2/ginkgo/watch/watch_command.go | 192 ++ .../onsi/ginkgo/v2/ginkgo_cli_dependencies.go | 8 + .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 2 +- .../onsi/ginkgo/v2/internal/group.go | 50 +- .../onsi/ginkgo/v2/internal/node.go | 64 +- .../onsi/ginkgo/v2/internal/ordering.go | 77 +- .../parallel_support/client_server.go | 2 + .../internal/parallel_support/http_client.go | 13 + .../internal/parallel_support/http_server.go | 29 +- .../internal/parallel_support/rpc_client.go | 13 + .../parallel_support/server_handler.go | 55 +- .../ginkgo/v2/internal/progress_report.go | 26 +- .../onsi/ginkgo/v2/internal/report_entry.go | 21 +- .../onsi/ginkgo/v2/internal/suite.go | 282 ++- .../internal/testingtproxy/testing_t_proxy.go | 2 +- .../onsi/ginkgo/v2/internal/writer.go | 42 +- .../ginkgo/v2/reporters/default_reporter.go | 617 +++--- .../v2/reporters/deprecated_reporter.go | 2 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 109 +- .../onsi/ginkgo/v2/reporters/reporter.go | 18 +- .../onsi/ginkgo/v2/reporting_dsl.go | 28 +- vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 33 +- .../onsi/ginkgo/v2/types/code_location.go | 78 +- .../github.com/onsi/ginkgo/v2/types/config.go | 34 +- .../ginkgo/v2/types/deprecation_support.go | 7 + .../github.com/onsi/ginkgo/v2/types/errors.go | 19 +- .../onsi/ginkgo/v2/types/label_filter.go | 11 + .../onsi/ginkgo/v2/types/report_entry.go | 14 +- .../github.com/onsi/ginkgo/v2/types/types.go | 310 ++- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/.gitignore | 3 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 80 + vendor/github.com/onsi/gomega/RELEASING.md | 2 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 28 +- .../onsi/gomega/internal/async_assertion.go | 212 ++- .../github.com/onsi/gomega/internal/gomega.go | 39 +- vendor/github.com/onsi/gomega/matchers.go | 17 +- .../gomega/matchers/have_exact_elements.go | 75 + .../gomega/matchers/match_error_matcher.go | 12 +- .../onsi/gomega/matchers/succeed_matcher.go | 9 + vendor/github.com/onsi/gomega/types/types.go | 9 +- vendor/go.uber.org/multierr/CHANGELOG.md | 8 + vendor/go.uber.org/multierr/error.go | 345 ++-- vendor/go.uber.org/zap/CHANGELOG.md | 13 + vendor/go.uber.org/zap/array_go118.go | 8 +- vendor/go.uber.org/zap/logger.go | 7 + vendor/go.uber.org/zap/options.go | 3 +- vendor/go.uber.org/zap/sink.go | 100 +- vendor/go.uber.org/zap/stacktrace.go | 2 +- vendor/go.uber.org/zap/sugar.go | 31 +- vendor/go.uber.org/zap/writer.go | 2 +- vendor/go.uber.org/zap/zapcore/entry.go | 3 +- vendor/golang.org/x/crypto/pkcs12/crypto.go | 2 +- .../x/crypto/pkcs12/internal/rc2/rc2.go | 53 +- vendor/golang.org/x/crypto/ssh/certs.go | 2 +- vendor/golang.org/x/crypto/ssh/cipher.go | 8 +- vendor/golang.org/x/crypto/ssh/common.go | 19 +- vendor/golang.org/x/crypto/ssh/connection.go | 2 +- vendor/golang.org/x/crypto/ssh/handshake.go | 69 +- vendor/golang.org/x/crypto/ssh/keys.go | 2 +- vendor/golang.org/x/crypto/ssh/messages.go | 2 +- vendor/golang.org/x/crypto/ssh/server.go | 27 +- vendor/golang.org/x/net/html/parse.go | 2 +- vendor/golang.org/x/net/html/token.go | 57 +- vendor/golang.org/x/net/http2/flow.go | 88 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/headermap.go | 18 + vendor/golang.org/x/net/http2/hpack/encode.go | 5 + vendor/golang.org/x/net/http2/hpack/hpack.go | 81 +- .../x/net/http2/hpack/static_table.go | 188 ++ vendor/golang.org/x/net/http2/hpack/tables.go | 78 +- vendor/golang.org/x/net/http2/server.go | 335 ++-- vendor/golang.org/x/net/http2/transport.go | 201 +- vendor/golang.org/x/net/trace/histogram.go | 2 +- vendor/golang.org/x/oauth2/google/doc.go | 12 +- .../google/internal/externalaccount/aws.go | 105 +- .../externalaccount/basecredentials.go | 6 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +- vendor/golang.org/x/sys/cpu/endian_big.go | 11 + vendor/golang.org/x/sys/cpu/endian_little.go | 11 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 + vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ioctl.go | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 4 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 14 + .../golang.org/x/sys/unix/syscall_darwin.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 1 + .../x/sys/unix/syscall_freebsd_386.go | 9 +- .../x/sys/unix/syscall_freebsd_amd64.go | 9 +- .../x/sys/unix/syscall_freebsd_arm.go | 9 +- .../x/sys/unix/syscall_freebsd_arm64.go | 9 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 9 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 22 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + vendor/golang.org/x/sys/unix/syscall_linux.go | 52 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 15 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../x/sys/unix/syscall_openbsd_libc.go | 4 +- .../golang.org/x/sys/unix/syscall_solaris.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 57 +- vendor/golang.org/x/sys/unix/timestruct.go | 2 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 9 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 30 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_loong64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 356 +++- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +- .../x/sys/unix/zerrors_openbsd_arm.go | 348 +++- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_386.go | 10 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 + .../golang.org/x/sys/unix/zsyscall_linux.go | 11 + .../x/sys/unix/zsyscall_netbsd_386.go | 10 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 + .../x/sys/unix/zsyscall_openbsd_386.go | 14 + .../x/sys/unix/zsyscall_openbsd_386.s | 137 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 137 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm.s | 137 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 137 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 812 ++++++-- .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 +++++++ .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 137 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 13 + .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 217 ++- .../x/sys/unix/ztypes_netbsd_386.go | 84 + .../x/sys/unix/ztypes_netbsd_amd64.go | 84 + .../x/sys/unix/ztypes_netbsd_arm.go | 84 + .../x/sys/unix/ztypes_netbsd_arm64.go | 84 + .../x/sys/unix/ztypes_openbsd_386.go | 97 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 33 +- .../x/sys/unix/ztypes_openbsd_arm.go | 9 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 9 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 9 +- .../x/sys/windows/syscall_windows.go | 15 +- .../x/sys/windows/zsyscall_windows.go | 7 + vendor/golang.org/x/term/terminal.go | 1 - .../internal/language/compact/language.go | 2 +- .../x/text/internal/language/language.go | 2 +- vendor/golang.org/x/text/language/language.go | 2 +- .../golang.org/x/text/unicode/bidi/trieval.go | 12 - vendor/golang.org/x/time/rate/rate.go | 20 +- vendor/golang.org/x/time/rate/sometimes.go | 67 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/ast/inspector/inspector.go | 218 +++ .../x/tools/go/ast/inspector/typeof.go | 229 +++ .../x/tools/internal/typeparams/common.go | 179 ++ .../x/tools/internal/typeparams/coretype.go | 122 ++ .../internal/typeparams/enabled_go117.go | 12 + .../internal/typeparams/enabled_go118.go | 15 + .../x/tools/internal/typeparams/normalize.go | 218 +++ .../x/tools/internal/typeparams/termlist.go | 163 ++ .../internal/typeparams/typeparams_go117.go | 197 ++ .../internal/typeparams/typeparams_go118.go | 151 ++ .../x/tools/internal/typeparams/typeterm.go | 170 ++ .../google.golang.org/appengine/appengine.go | 18 +- .../appengine/appengine_vm.go | 10 +- .../appengine/internal/api.go | 128 +- .../appengine/internal/api_classic.go | 1 + .../appengine/internal/api_common.go | 20 +- .../appengine/internal/identity_classic.go | 1 + .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 3 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 1 + .../googleapis/rpc/status/status.pb.go | 10 +- .../grpc/attributes/attributes.go | 2 +- vendor/google.golang.org/grpc/backoff.go | 2 +- .../grpc/balancer/balancer.go | 25 +- .../grpc/balancer/base/balancer.go | 4 +- .../grpc/balancer/conn_state_evaluator.go | 12 +- .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 71 +- .../grpc_binarylog_v1/binarylog.pb.go | 20 +- .../grpc/channelz/channelz.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 144 +- .../grpc/credentials/credentials.go | 20 +- .../google.golang.org/grpc/credentials/tls.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 25 +- .../grpc/encoding/encoding.go | 5 +- .../grpc/grpclog/loggerv2.go | 2 +- .../grpc/internal/binarylog/binarylog.go | 20 +- .../grpc/internal/binarylog/env_config.go | 20 +- .../grpc/internal/binarylog/method_logger.go | 21 +- .../grpc/internal/channelz/types.go | 16 +- .../grpc/internal/envconfig/envconfig.go | 8 +- .../grpc/internal/envconfig/observability.go | 36 + .../grpc/internal/envconfig/xds.go | 7 +- .../grpc/internal/grpclog/grpclog.go | 2 +- .../grpc/internal/grpcrand/grpcrand.go | 7 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/method.go | 1 - .../grpc/internal/internal.go | 42 +- .../internal/resolver/dns/dns_resolver.go | 4 +- .../resolver/passthrough/passthrough.go | 9 +- .../grpc/internal/resolver/unix/unix.go | 9 +- .../internal/serviceconfig/serviceconfig.go | 8 +- .../grpc/internal/status/status.go | 10 + .../grpc/internal/transport/controlbuf.go | 50 +- .../grpc/internal/transport/handler_server.go | 53 +- .../grpc/internal/transport/http2_client.go | 238 ++- .../grpc/internal/transport/http2_server.go | 115 +- .../grpc/internal/transport/http_util.go | 23 +- .../grpc/internal/transport/transport.go | 16 +- .../grpc/metadata/metadata.go | 73 +- .../google.golang.org/grpc/picker_wrapper.go | 7 +- vendor/google.golang.org/grpc/pickfirst.go | 4 +- vendor/google.golang.org/grpc/preloader.go | 2 +- vendor/google.golang.org/grpc/regenerate.sh | 7 +- .../grpc/resolver/resolver.go | 14 +- vendor/google.golang.org/grpc/rpc_util.go | 39 +- vendor/google.golang.org/grpc/server.go | 224 ++- .../grpc/serviceconfig/serviceconfig.go | 2 +- .../google.golang.org/grpc/status/status.go | 12 +- vendor/google.golang.org/grpc/stream.go | 135 +- vendor/google.golang.org/grpc/tap/tap.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 9 +- .../apimachinery/pkg/api/meta/errors.go | 35 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 2 +- .../pkg/apis/meta/v1/micro_time_proto.go | 10 +- .../apimachinery/pkg/labels/selector.go | 10 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 148 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 7 +- .../apimachinery/pkg/util/sets/empty.go | 4 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 148 +- .../apimachinery/pkg/util/sets/int32.go | 148 +- .../apimachinery/pkg/util/sets/int64.go | 148 +- .../apimachinery/pkg/util/sets/ordered.go | 53 + .../k8s.io/apimachinery/pkg/util/sets/set.go | 227 +++ .../apimachinery/pkg/util/sets/string.go | 148 +- .../k8s.io/klog/v2/internal/buffer/buffer.go | 40 +- .../klog/v2/internal/serialize/keyvalues.go | 179 +- vendor/k8s.io/klog/v2/k8s_references.go | 78 +- vendor/k8s.io/klog/v2/klog.go | 38 +- vendor/k8s.io/klog/v2/klogr.go | 12 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 20 + .../go-json-experiment/json/AUTHORS | 3 + .../go-json-experiment/json/CONTRIBUTORS | 3 + .../go-json-experiment/json/LICENSE | 27 + .../go-json-experiment/json/README.md | 321 ++++ .../go-json-experiment/json/arshal.go | 506 +++++ .../go-json-experiment/json/arshal_any.go | 219 +++ .../go-json-experiment/json/arshal_default.go | 1446 ++++++++++++++ .../go-json-experiment/json/arshal_funcs.go | 387 ++++ .../go-json-experiment/json/arshal_inlined.go | 186 ++ .../go-json-experiment/json/arshal_methods.go | 229 +++ .../go-json-experiment/json/arshal_time.go | 196 ++ .../go-json-experiment/json/decode.go | 1655 +++++++++++++++++ .../go-json-experiment/json/doc.go | 185 ++ .../go-json-experiment/json/encode.go | 1146 ++++++++++++ .../go-json-experiment/json/errors.go | 183 ++ .../go-json-experiment/json/fields.go | 509 +++++ .../go-json-experiment/json/fold.go | 56 + .../go-json-experiment/json/intern.go | 86 + .../go-json-experiment/json/pools.go | 150 ++ .../go-json-experiment/json/state.go | 747 ++++++++ .../go-json-experiment/json/token.go | 522 ++++++ .../go-json-experiment/json/value.go | 375 ++++ .../kube-openapi/pkg/validation/spec/fuzz.go | 502 +++++ .../pkg/validation/spec/gnostic.go | 6 +- .../pkg/validation/spec/header.go | 30 + .../kube-openapi/pkg/validation/spec/info.go | 48 + .../kube-openapi/pkg/validation/spec/items.go | 28 + .../pkg/validation/spec/operation.go | 24 + .../pkg/validation/spec/parameter.go | 67 +- .../pkg/validation/spec/path_item.go | 31 + .../kube-openapi/pkg/validation/spec/paths.go | 59 + .../pkg/validation/spec/response.go | 37 +- .../pkg/validation/spec/responses.go | 67 + .../pkg/validation/spec/schema.go | 41 + .../pkg/validation/spec/security_scheme.go | 18 + .../pkg/validation/spec/swagger.go | 107 ++ .../kube-openapi/pkg/validation/spec/tag.go | 23 + vendor/k8s.io/utils/net/ipfamily.go | 181 ++ vendor/k8s.io/utils/net/net.go | 126 +- vendor/k8s.io/utils/net/port.go | 18 +- vendor/k8s.io/utils/pointer/pointer.go | 20 + vendor/k8s.io/utils/trace/trace.go | 30 +- vendor/modules.txt | 131 +- .../internal/golang/encoding/json/decode.go | 5 +- .../internal/golang/encoding/json/encode.go | 37 +- .../internal/golang/encoding/json/fold.go | 5 +- .../internal/golang/encoding/json/scanner.go | 2 + .../internal/golang/encoding/json/stream.go | 1 - vendor/sigs.k8s.io/json/json.go | 28 +- 411 files changed, 33459 insertions(+), 5153 deletions(-) create mode 100644 vendor/cloud.google.com/go/compute/internal/version.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/CHANGES.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/README.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/tidyfix.go rename vendor/github.com/HewlettPackard/dws/api/v1alpha1/{systemconfiguration_types.go => system_configuration_types.go} (88%) create mode 100644 vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_port_manager_types.go create mode 100644 vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) create mode 100644 vendor/github.com/go-task/slim-sprig/.editorconfig create mode 100644 vendor/github.com/go-task/slim-sprig/.gitattributes create mode 100644 vendor/github.com/go-task/slim-sprig/.gitignore create mode 100644 vendor/github.com/go-task/slim-sprig/CHANGELOG.md create mode 100644 vendor/github.com/go-task/slim-sprig/LICENSE.txt create mode 100644 vendor/github.com/go-task/slim-sprig/README.md create mode 100644 vendor/github.com/go-task/slim-sprig/Taskfile.yml create mode 100644 vendor/github.com/go-task/slim-sprig/crypto.go create mode 100644 vendor/github.com/go-task/slim-sprig/date.go create mode 100644 vendor/github.com/go-task/slim-sprig/defaults.go create mode 100644 vendor/github.com/go-task/slim-sprig/dict.go create mode 100644 vendor/github.com/go-task/slim-sprig/doc.go create mode 100644 vendor/github.com/go-task/slim-sprig/functions.go create mode 100644 vendor/github.com/go-task/slim-sprig/list.go create mode 100644 vendor/github.com/go-task/slim-sprig/network.go create mode 100644 vendor/github.com/go-task/slim-sprig/numeric.go create mode 100644 vendor/github.com/go-task/slim-sprig/reflect.go create mode 100644 vendor/github.com/go-task/slim-sprig/regex.go create mode 100644 vendor/github.com/go-task/slim-sprig/strings.go create mode 100644 vendor/github.com/go-task/slim-sprig/url.go create mode 100644 vendor/github.com/google/pprof/AUTHORS create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 vendor/github.com/google/pprof/LICENSE create mode 100644 vendor/github.com/google/pprof/profile/encode.go create mode 100644 vendor/github.com/google/pprof/profile/filter.go create mode 100644 vendor/github.com/google/pprof/profile/index.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 vendor/github.com/google/pprof/profile/merge.go create mode 100644 vendor/github.com/google/pprof/profile/profile.go create mode 100644 vendor/github.com/google/pprof/profile/proto.go create mode 100644 vendor/github.com/google/pprof/profile/prune.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_exact_elements.go create mode 100644 vendor/golang.org/x/net/http2/hpack/static_table.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s create mode 100644 vendor/golang.org/x/time/rate/sometimes.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/set.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/flags.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go create mode 100644 vendor/k8s.io/utils/net/ipfamily.go diff --git a/go.mod b/go.mod index 6b8acb40..3c878915 100644 --- a/go.mod +++ b/go.mod @@ -3,20 +3,20 @@ module github.com/NearNodeFlash/nnf-dm go 1.19 require ( - github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075 + github.com/HewlettPackard/dws v0.0.0-20230224185520-92a8916979a7 github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988 - github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a - github.com/onsi/ginkgo/v2 v2.4.0 - github.com/onsi/gomega v1.23.0 + github.com/NearNodeFlash/nnf-sos v0.0.0-20230301192820-b2c4363218b9 + github.com/onsi/ginkgo/v2 v2.8.4 + github.com/onsi/gomega v1.27.1 github.com/prometheus/client_golang v1.13.0 github.com/takama/daemon v1.0.0 - go.uber.org/zap v1.23.0 - golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be - golang.org/x/sys v0.1.0 - google.golang.org/grpc v1.49.0 + go.uber.org/zap v1.24.0 + golang.org/x/crypto v0.5.0 + golang.org/x/sys v0.5.0 + google.golang.org/grpc v1.52.1 google.golang.org/protobuf v1.28.1 k8s.io/api v0.25.2 - k8s.io/apimachinery v0.25.2 + k8s.io/apimachinery v0.26.1 k8s.io/client-go v0.25.2 k8s.io/component-base v0.25.2 // indirect sigs.k8s.io/controller-runtime v0.13.0 @@ -29,9 +29,9 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f // indirect + github.com/NearNodeFlash/nnf-ec v0.0.0-20230228220541-47eb7a4dbe07 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -60,52 +60,38 @@ require ( github.com/prometheus/procfs v0.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.1.0 // indirect - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect - golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect - golang.org/x/term v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect - golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/appengine v1.6.8-0.20220404170424-ca3835e9b4d6 // indirect - google.golang.org/genproto v0.0.0-20220929141241-1ce7b20da813 // indirect + google.golang.org/appengine v1.6.8-0.20221117013220-504804fb50de // indirect + google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.25.2 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20220928191237-829ce0c27909 // indirect - k8s.io/utils v0.0.0-20220922133306-665eaaec4324 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/klog/v2 v2.90.0 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go/compute v1.10.0 // indirect - github.com/HewlettPackard/structex v1.0.2 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/dgraph-io/badger/v3 v3.2103.1 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect + cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect - github.com/golang/glog v1.0.0 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v2.0.0+incompatible // indirect - github.com/gorilla/mux v1.8.0 // indirect - github.com/klauspost/compress v1.13.5 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect - github.com/pkg/term v1.1.0 // indirect - github.com/rs/cors v1.8.0 // indirect - github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46 // indirect - github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c // indirect - github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29 // indirect - go.opencensus.io v0.23.0 // indirect - k8s.io/mount-utils v0.25.2 // indirect + github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/stretchr/testify v1.8.1 // indirect + golang.org/x/tools v0.6.0 // indirect ) diff --git a/go.sum b/go.sum index b6342ea3..55e55b3e 100644 --- a/go.sum +++ b/go.sum @@ -13,59 +13,27 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.92.1/go.mod h1:cMc7asehN84LBi1JGTHo4n8wuaGuNAZ7lR7b1YNJBrE= -cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.92.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.22.0/go.mod h1:8c8KM5gUHw1doiZR4FGIwTBEmXC/xyM3vqmcvTXpUgc= -cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8= -cloud.google.com/go/cloudtasks v0.1.0/go.mod h1:jw28nXttHWt/e2wWiTCvA+q76y0rFb8boKyHUu33/+M= -cloud.google.com/go/compute v1.10.0 h1:aoLIYaA1fX3ywihqpBk2APQKOo20nXsp1GEZQbx5Jk4= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/datacatalog v0.1.0/go.mod h1:MI16U99JCHsfQJtEA4kIsGlWiaTljiRinWYu78at7ks= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.5.0/go.mod h1:RGUNM0FFAVkYA94BLTxoXBgfIyY1Riq67TwaBXH0lwc= -cloud.google.com/go/errorreporting v0.1.0/go.mod h1:cZSiBMvrnl0X13pD9DwKf9sQ8Eqy3EzHqkyKBZxiIrM= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= -cloud.google.com/go/kms v0.2.0/go.mod h1:CBIzEY1pzeYnGSN3fwc7OHerywU5LlL0hoHLtGIxW1A= -cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo= -cloud.google.com/go/monitoring v0.2.0/go.mod h1:K/JoZWY3xszHf38AMkzZGx1n5eT1/57ilElGMpESsEE= -cloud.google.com/go/profiler v0.1.0/go.mod h1:D7S7LV/zKbRWkOzYL1b5xytpqt8Ikd/v/yvf1/Tx2pQ= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.17.0/go.mod h1:bBIeYx9ftf/hr7eoSUim6cRaOYZE/hHuigwdwLLByi8= -cloud.google.com/go/secretmanager v0.1.0/go.mod h1:3nGKHvnzDUVit7U0S9KAKJ4aOsO1xtwRG+7ey5LK1bM= -cloud.google.com/go/spanner v1.25.0/go.mod h1:kQUft3x355hzzaeFbObjsvkzZDgpDkesp3v75WBnI8w= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.16.1/go.mod h1:LaNorbty3ehnU3rEjXSNV/NRgQA0O8Y+uh6bPe5UOk4= -cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= -contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -85,122 +53,62 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/HewlettPackard/dws v0.0.0-20221027172242-11a787dffb4c h1:E8x4Ovw8gYYIKJ9qMjf6Qf4toNG9OW6VGtbkPFmIFXY= -github.com/HewlettPackard/dws v0.0.0-20221027172242-11a787dffb4c/go.mod h1:OxTK/qLMVBktON5wHI6zGuKEsoT8iLpPtUX+y02/m10= -github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075 h1:847PiLepVQ10SoPQEc3OKdxWg6LXt70WXcCQj7ih3Bc= -github.com/HewlettPackard/dws v0.0.0-20230207214424-016335e15075/go.mod h1:OxTK/qLMVBktON5wHI6zGuKEsoT8iLpPtUX+y02/m10= -github.com/HewlettPackard/structex v1.0.2 h1:p2EH/p6zvUd5fSa0onudAUvLWdKsvQSRP0jGKeblA5E= -github.com/HewlettPackard/structex v1.0.2/go.mod h1:3frC4RY/cPsP/4+N8rkxsNAGlQwHV+zDC7qvrN+N+rE= -github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/HewlettPackard/dws v0.0.0-20230224185520-92a8916979a7 h1:wh2jAzQHY2WaT0eVYDtIo6hCbotQ5yUvIwe3s90Wob0= +github.com/HewlettPackard/dws v0.0.0-20230224185520-92a8916979a7/go.mod h1:ut/okqv7uImSeheKFGZQe07U1d6J9lluvw2eecRqiWU= github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988 h1:T2jooIfkWh80uHlGNEg2DIP3LqVRmshkQvcKkmZBt1c= github.com/NearNodeFlash/lustre-fs-operator v0.0.0-20221101145649-697b21833988/go.mod h1:j01cfKrQnsszCBkNGuwymvcAi1Rt7i5ZvzjKwUHBjyA= -github.com/NearNodeFlash/nnf-ec v0.0.0-20220920142937-1080197e523b h1:+4x/yTuM6j1KzEmKZrBx9LmZEARJ/zc5mH+38yZnU3c= -github.com/NearNodeFlash/nnf-ec v0.0.0-20220920142937-1080197e523b/go.mod h1:s6Gp5d88rhMFcqrkF/Ds8D+n6GMAv9iw004YhAL3Neo= -github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f h1:Dr/JRcUA0N8ZKbUnXx/J6pHIPfVT7rrzFFFKyelRe8E= -github.com/NearNodeFlash/nnf-ec v0.0.0-20230221180225-1dce5b1f270f/go.mod h1:s6Gp5d88rhMFcqrkF/Ds8D+n6GMAv9iw004YhAL3Neo= -github.com/NearNodeFlash/nnf-sos v0.0.0-20221012202709-192706b48299 h1:y7Isj6LA6CFI/JPyeXOwt7ii09PTiVmkx99N09Zwjg4= -github.com/NearNodeFlash/nnf-sos v0.0.0-20221012202709-192706b48299/go.mod h1:ytCSNDNgPEqNndL042/FeozWIVUixWOD/mmPqhIDYDY= -github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a h1:4GTC2q72uJow9C5JpAfh7xjGW1HwlFdEvt++ma51Kz4= -github.com/NearNodeFlash/nnf-sos v0.0.0-20230222155613-59e3bc22638a/go.mod h1:VWtvXpr3ndLhFnxib7LNvHQb1xPp13vj6Kn0NGslNv0= -github.com/NearNodeFlash/nnf-sos v0.0.0-20230227170353-585d71d73f83 h1:mfi2xf+BOnUHWvDna9VpMWEm1ZsLxX+lP3s0evj1cAg= -github.com/NearNodeFlash/nnf-sos v0.0.0-20230227170353-585d71d73f83/go.mod h1:VWtvXpr3ndLhFnxib7LNvHQb1xPp13vj6Kn0NGslNv0= -github.com/NearNodeFlash/nnf-sos v0.0.0-20230227230025-fa4f33456559 h1:UQwhTzdjCFFqj3S0ljraIJZ9oVB8VcHrJhy7H9M75I8= -github.com/NearNodeFlash/nnf-sos v0.0.0-20230227230025-fa4f33456559/go.mod h1:VWtvXpr3ndLhFnxib7LNvHQb1xPp13vj6Kn0NGslNv0= +github.com/NearNodeFlash/nnf-ec v0.0.0-20230228220541-47eb7a4dbe07 h1:e2HfGFA3rqAxhLbvfB6A5PeaLUYAsVEV5GyufvmPVys= +github.com/NearNodeFlash/nnf-ec v0.0.0-20230228220541-47eb7a4dbe07/go.mod h1:ZeU6daZ/sVfzcN1DPmMNmiMwnVJy69bWs14l0u1Va4o= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230301192820-b2c4363218b9 h1:GcxYAU8zA1ddgE5uwWDJzOao6NKgKH0HNWkjaTioFPM= +github.com/NearNodeFlash/nnf-sos v0.0.0-20230301192820-b2c4363218b9/go.mod h1:pt+nggMj394FTBPx8eQldEtYBk1YlP3NOBXkbSoum0E= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VividCortex/mysqlerr v1.0.0/go.mod h1:xERx8E4tBhLvpjzdUyQiSfUxeMcATEQrflDAfXsqcAE= -github.com/alecthomas/kong v0.2.17/go.mod h1:ka3VZ8GZNPXv9Ov+j4YNLkI8mTuhXyr/0ktSlqIydQQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.40.42/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/bazelbuild/buildtools v0.0.0-20210911013817-37179d5767a1/go.mod h1:689QdV3hBP7Vo9dJMmzhoYIyo/9iMhEmHkJcnaPRCbo= -github.com/bazelbuild/remote-apis v0.0.0-20210718193713-0ecef08215cf/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8= -github.com/bazelbuild/remote-apis v0.0.0-20210812183132-3e816456ee28/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8= -github.com/bazelbuild/remote-apis-sdks v0.0.0-20210909182119-af1232ee0d79/go.mod h1:50ZasLDlgIh6V79vGr/vS7K8LXZC9xjJFJX8QCrxNhE= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/danjacques/gofslock v0.0.0-20200623023034-5d0bd0fa6ef0/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v3 v3.2103.1 h1:zaX53IRg7ycxVlkd5pYdCeFp1FynD6qBGQoQql3R3Hk= -github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -208,12 +116,10 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -228,14 +134,10 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -243,9 +145,6 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -258,8 +157,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -275,20 +172,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -298,11 +185,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -310,8 +194,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -319,159 +201,53 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210804190019-f964ff605595/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c h1:lvddKcYTQ545ADhBujtIJmqQrZBDsGo7XIMbAQe/sNY= +github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20210901121439-eee08aaf2717/go.mod h1:0RnbP5ioI0nqRf3R9iK3iQaUJgsn0htlZEHCMn8FSfw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= -github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jordan-wright/email v4.0.1-0.20210109023952-943e75fe5223+incompatible/go.mod h1:1c7szIrayyPPB/987hsnvNzLushdWf4o/79s3P08L8A= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/luci/gtreap v0.0.0-20161228054646-35df89791e8f/go.mod h1:OjKOY0UvVOOH5nWXSIWTbQWESn8dDiGlaEZx6IAsWhU= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/maruel/subcommands v1.1.0/go.mod h1:b25AG9Eho2Rs1NUPAPAYBFy1B5y63QMxw/2WmLGO8m8= -github.com/maruel/ut v1.0.2/go.mod h1:RV8PwPD9dd2KFlnlCc/DB2JVvkXmyaalfc5xvmSrRSs= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -479,53 +255,24 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mostynb/zstdpool-syncpool v0.0.7/go.mod h1:YpzqIpN8xvRZZvemem7CMLPWkjuaKR37MnkQruSj6aw= -github.com/mostynb/zstdpool-syncpool v0.0.10/go.mod h1:BmhpjzZxG8KCduFi0N/Do6j9w+JYt6vR2cM8J9AwujI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/onsi/ginkgo/v2 v2.8.4 h1:gf5mIQ8cLFieruNLAdgijHF1PYfLphKm2dxxcUtcqK0= +github.com/onsi/ginkgo/v2 v2.8.4/go.mod h1:427dEDQZkDKsBvCjc2A/ZPefhKxsTTrsQegMlayL730= +github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= -github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -537,8 +284,6 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -546,141 +291,74 @@ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/protocolbuffers/txtpbfmt v0.0.0-20210910155415-af4447816e4d/go.mod h1:KbKfKPy2I6ecOIGA9apfheFv14+P3RSmmQvshofQyMY= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= -github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46 h1:Dz0HrI1AtNSGCE8LXLLqoZU4iuOJXPWndenCsZfstA8= -github.com/senseyeio/duration v0.0.0-20180430131211-7c2a214ada46/go.mod h1:is8FVkzSi7PYLWEXT5MgWhglFsyyiW8ffxAoJqfuFZo= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c h1:hk0Jigjfq59yDMgd6bzi22Das5tyxU0CtOkh7a9io84= -github.com/sigurn/crc8 v0.0.0-20160107002456-e55481d6f45c/go.mod h1:cyrWuItcOVIGX6fBZ/G00z4ykprWM7hH58fSavNkjRg= -github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144 h1:ccb8W1+mYuZvlpn/mJUMAbsFHTMCpcJBS78AsBQxNcY= -github.com/sigurn/utils v0.0.0-20190728110027-e1fefb11a144/go.mod h1:VRI4lXkrUH5Cygl6mbG1BRUfMMoT2o8BkrtBDUAm+GU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/takama/daemon v1.0.0 h1:XS3VLnFKmqw2Z7fQ/dHRarrVjdir9G3z7BEP8osjizQ= github.com/takama/daemon v1.0.0/go.mod h1:gKlhcjbqtBODg5v9H1nj5dU1a2j2GemtuWSNLD5rxOE= -github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8= -github.com/yosuke-furukawa/json5 v0.1.1/go.mod h1:sw49aWDqNdRJ6DYUtIQiaA3xyj2IL9tjeNYmX2ixwcU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29 h1:TgvWIQO0I0E7pKVLq/ARDhT2p2kHGKLtZhAN0EtUxiI= -go.chromium.org/luci v0.0.0-20210915061045-7722a2154c29/go.mod h1:gk1qa1CBZAdya1PqugRJjkwk8LIymAoB5wf5UA7NrFk= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= -go.starlark.net v0.0.0-20210901212718-87f333178d59/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be h1:fmw3UbQh+nxngCAHrDCCztao/kbYFnWjoqop8dHx05A= -golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -703,8 +381,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -713,18 +389,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -746,52 +413,29 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -802,22 +446,13 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -825,18 +460,11 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -852,78 +480,44 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210507014357-30e306a8bba5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -934,7 +528,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -961,21 +554,9 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -998,33 +579,16 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8-0.20220404170424-ca3835e9b4d6 h1:+EhUdjOKPdPmUZ3fBm0hCLvhFM+2gfhgUCOqzzzjUDw= -google.golang.org/appengine v1.6.8-0.20220404170424-ca3835e9b4d6/go.mod h1:BbwiCY3WCmCUKOJTrX5NwgQzew1c32w3kxa6Sxvs0cQ= +google.golang.org/appengine v1.6.8-0.20221117013220-504804fb50de h1:MvEeYmzkzk0Rsw+ceqy28aIJN7Mum+4aYqBwCMqYNug= +google.golang.org/appengine v1.6.8-0.20221117013220-504804fb50de/go.mod h1:BbwiCY3WCmCUKOJTrX5NwgQzew1c32w3kxa6Sxvs0cQ= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1053,49 +617,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210506142907-4a47615972c2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210830153122-0bac4d21c8ea/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220929141241-1ce7b20da813 h1:buul04Ikd79A5tP8nGhKEyMfr+/HplsO6nqSUapWZ/M= -google.golang.org/genproto v0.0.0-20220929141241-1ce7b20da813/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 h1:O97sLx/Xmb/KIZHB/2/BzofxBs5QmmR0LcihPtllmbc= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1105,22 +632,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.52.1 h1:2NpOPk5g5Xtb0qebIEs7hNIa++PdtZLo2AQUpc1YnSU= +google.golang.org/grpc v1.52.1/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1137,24 +653,14 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1181,31 +687,25 @@ k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= k8s.io/apiextensions-apiserver v0.25.2 h1:8uOQX17RE7XL02ngtnh3TgifY7EhekpK+/piwzQNnBo= k8s.io/apiextensions-apiserver v0.25.2/go.mod h1:iRwwRDlWPfaHhuBfQ0WMa5skdQfrE18QXJaJvIDLvE8= -k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= -k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= k8s.io/component-base v0.25.2 h1:Nve/ZyHLUBHz1rqwkjXm/Re6IniNa5k7KgzxZpTfSQY= k8s.io/component-base v0.25.2/go.mod h1:90W21YMr+Yjg7MX+DohmZLzjsBtaxQDDwaX4YxDkl60= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220928191237-829ce0c27909 h1:q/70bz7C1/LGuQu/JBX7Fpi55CwcCts/wbvlehe0RRo= -k8s.io/kube-openapi v0.0.0-20220928191237-829ce0c27909/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/mount-utils v0.25.0/go.mod h1:WTYq8Ev/JrnkqK2h1jFUnC8qWGuqzMb9XDC+Lu3WNU0= -k8s.io/mount-utils v0.25.2 h1:ZNIHSaPaaPqFqtZzaTbgH3oHKoWZHoNBnTKafQRj6A4= -k8s.io/mount-utils v0.25.2/go.mod h1:WTYq8Ev/JrnkqK2h1jFUnC8qWGuqzMb9XDC+Lu3WNU0= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220922133306-665eaaec4324 h1:i+xdFemcSNuJvIfBlaYuXgRondKxK4z4prVPKzEaelI= -k8s.io/utils v0.0.0-20220922133306-665eaaec4324/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= +k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 00000000..7dd6a0aa --- /dev/null +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.15.1" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 00000000..06b95734 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 00000000..f940fb2c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 50538b1d..c17faa14 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, Timeout: 5 * time.Second, } @@ -146,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go new file mode 100644 index 00000000..4cef4850 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package metadata + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/system_configuration_types.go similarity index 88% rename from vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go rename to vendor/github.com/HewlettPackard/dws/api/v1alpha1/system_configuration_types.go index 09e7c53f..2605665d 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/systemconfiguration_types.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/system_configuration_types.go @@ -21,6 +21,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // SystemConfigurationComputeNode describes a compute node in the system @@ -59,6 +60,12 @@ type SystemConfigurationSpec struct { // StorageNodes is the list of storage nodes on the system StorageNodes []SystemConfigurationStorageNode `json:"storageNodes,omitempty"` + + // Ports is the list of ports available for communication between nodes in the system. + // Valid values are single integers, or a range of values of the form "START-END" where + // START is an integer value that represents the start of a port range and END is an + // integer value that represents the end of the port range (inclusive). + Ports []intstr.IntOrString `json:"ports,omitempty"` } // SystemConfigurationStatus defines the status of SystemConfiguration diff --git a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go index a91fa935..3055cd1c 100644 --- a/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/HewlettPackard/dws/api/v1alpha1/zz_generated.deepcopy.go @@ -28,6 +28,7 @@ import ( "github.com/HewlettPackard/dws/utils/dwdparse" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -1202,6 +1203,11 @@ func (in *SystemConfigurationSpec) DeepCopyInto(out *SystemConfigurationSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]intstr.IntOrString, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationSpec. diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_port_manager_types.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_port_manager_types.go new file mode 100644 index 00000000..2b987799 --- /dev/null +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/nnf_port_manager_types.go @@ -0,0 +1,136 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + "github.com/HewlettPackard/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfPortManagerAllocationSpec defines the desired state for a single port allocation +type NnfPortManagerAllocationSpec struct { + // Requester is an object reference to the requester of a ports. + Requester corev1.ObjectReference `json:"requester"` + + // Count is the number of desired ports the requester needs. The port manager + // will attempt to allocate this many ports. + // +kubebuilder:default:=1 + Count int `json:"count"` +} + +// NnfPortManagerSpec defines the desired state of NnfPortManager +type NnfPortManagerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // SystemConfiguration is an object reference to the system configuration. The + // Port Manager will use the available ports defined in the system configuration. + SystemConfiguration corev1.ObjectReference `json:"systemConfiguration"` + + // Allocations is a list of allocation requests that the Port Manager will attempt + // to satisfy. To request port resources from the port manager, clients should add + // an entry to the allocations. Entries must be unique. The port manager controller + // will attempt to allocate port resources for each allocation specification in the + // list. To remove an allocation and free up port resources, remove the allocation + // from the list. + Allocations []NnfPortManagerAllocationSpec `json:"allocations"` +} + +// AllocationStatus is the current status of a port requestor. A port that is in use by the respective owner +// will have a status of "InUse". A port that is freed by the owner but not yet reclaimed by the port manager +// will have a status of "Free". Any other status value indicates a failure of the port allocation. +// +kubebuilder:validation:Enum:=InUse;Free;InvalidConfiguration;InsufficientResources +type NnfPortManagerAllocationStatusStatus string + +const ( + NnfPortManagerAllocationStatusInUse NnfPortManagerAllocationStatusStatus = "InUse" + NnfPortManagerAllocationStatusFree NnfPortManagerAllocationStatusStatus = "Free" + NnfPortManagerAllocationStatusInvalidConfiguration NnfPortManagerAllocationStatusStatus = "InvalidConfiguration" + NnfPortManagerAllocationStatusInsufficientResources NnfPortManagerAllocationStatusStatus = "InsufficientResources" + // NOTE: You must ensure any new value is added to the above kubebuilder validation enum +) + +// NnfPortManagerAllocationStatus defines the allocation status of a port for a given requester. +type NnfPortManagerAllocationStatus struct { + // Requester is an object reference to the requester of the port resource, if one exists, or + // empty otherwise. + Requester *corev1.ObjectReference `json:"requester,omitempty"` + + // Ports is list of ports allocated to the owning resource. + Ports []uint16 `json:"ports,omitempty"` + + // Status is the ownership status of the port. + Status NnfPortManagerAllocationStatusStatus `json:"status"` +} + +// PortManagerStatus is the current status of the port manager. +// +kubebuilder:validation:Enum:=Ready;SystemConfigurationNotFound +type NnfPortManagerStatusStatus string + +const ( + NnfPortManagerStatusReady NnfPortManagerStatusStatus = "Ready" + NnfPortManagerStatusSystemConfigurationNotFound NnfPortManagerStatusStatus = "SystemConfigurationNotFound" + // NOTE: You must ensure any new value is added in the above kubebuilder validation enum +) + +// NnfPortManagerStatus defines the observed state of NnfPortManager +type NnfPortManagerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Allocations is a list of port allocation status'. + Allocations []NnfPortManagerAllocationStatus `json:"allocations,omitempty"` + + // Status is the current status of the port manager. + Status NnfPortManagerStatusStatus `json:"status"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// NnfPortManager is the Schema for the nnfportmanagers API +type NnfPortManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfPortManagerSpec `json:"spec,omitempty"` + Status NnfPortManagerStatus `json:"status,omitempty"` +} + +func (mgr *NnfPortManager) GetStatus() updater.Status[*NnfPortManagerStatus] { + return &mgr.Status +} + +//+kubebuilder:object:root=true + +// NnfPortManagerList contains a list of NnfPortManager +type NnfPortManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfPortManager `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfPortManager{}, &NnfPortManagerList{}) +} diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go index 49df07a3..69ff0d66 100644 --- a/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/NearNodeFlash/nnf-sos/api/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,7 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -817,6 +818,149 @@ func (in *NnfNodeStorageStatus) DeepCopy() *NnfNodeStorageStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManager) DeepCopyInto(out *NnfPortManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManager. +func (in *NnfPortManager) DeepCopy() *NnfPortManager { + if in == nil { + return nil + } + out := new(NnfPortManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfPortManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerAllocationSpec) DeepCopyInto(out *NnfPortManagerAllocationSpec) { + *out = *in + out.Requester = in.Requester +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationSpec. +func (in *NnfPortManagerAllocationSpec) DeepCopy() *NnfPortManagerAllocationSpec { + if in == nil { + return nil + } + out := new(NnfPortManagerAllocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerAllocationStatus) DeepCopyInto(out *NnfPortManagerAllocationStatus) { + *out = *in + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]uint16, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationStatus. +func (in *NnfPortManagerAllocationStatus) DeepCopy() *NnfPortManagerAllocationStatus { + if in == nil { + return nil + } + out := new(NnfPortManagerAllocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerList) DeepCopyInto(out *NnfPortManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfPortManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerList. +func (in *NnfPortManagerList) DeepCopy() *NnfPortManagerList { + if in == nil { + return nil + } + out := new(NnfPortManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfPortManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerSpec) DeepCopyInto(out *NnfPortManagerSpec) { + *out = *in + out.SystemConfiguration = in.SystemConfiguration + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfPortManagerAllocationSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerSpec. +func (in *NnfPortManagerSpec) DeepCopy() *NnfPortManagerSpec { + if in == nil { + return nil + } + out := new(NnfPortManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerStatus) DeepCopyInto(out *NnfPortManagerStatus) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfPortManagerAllocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerStatus. +func (in *NnfPortManagerStatus) DeepCopy() *NnfPortManagerStatus { + if in == nil { + return nil + } + out := new(NnfPortManagerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfResourceStatus) DeepCopyInto(out *NnfResourceStatus) { *out = *in diff --git a/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml new file mode 100644 index 00000000..83f47e65 --- /dev/null +++ b/vendor/github.com/NearNodeFlash/nnf-sos/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml @@ -0,0 +1,222 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: nnfportmanagers.nnf.cray.hpe.com +spec: + group: nnf.cray.hpe.com + names: + kind: NnfPortManager + listKind: NnfPortManagerList + plural: nnfportmanagers + singular: nnfportmanager + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NnfPortManager is the Schema for the nnfportmanagers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NnfPortManagerSpec defines the desired state of NnfPortManager + properties: + allocations: + description: Allocations is a list of allocation requests that the + Port Manager will attempt to satisfy. To request port resources + from the port manager, clients should add an entry to the allocations. + Entries must be unique. The port manager controller will attempt + to allocate port resources for each allocation specification in + the list. To remove an allocation and free up port resources, remove + the allocation from the list. + items: + description: NnfPortManagerAllocationSpec defines the desired state + for a single port allocation + properties: + count: + default: 1 + description: Count is the number of desired ports the requester + needs. The port manager will attempt to allocate this many + ports. + type: integer + requester: + description: Requester is an object reference to the requester + of a ports. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - count + - requester + type: object + type: array + systemConfiguration: + description: SystemConfiguration is an object reference to the system + configuration. The Port Manager will use the available ports defined + in the system configuration. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - allocations + - systemConfiguration + type: object + status: + description: NnfPortManagerStatus defines the observed state of NnfPortManager + properties: + allocations: + description: Allocations is a list of port allocation status'. + items: + description: NnfPortManagerAllocationStatus defines the allocation + status of a port for a given requester. + properties: + ports: + description: Ports is list of ports allocated to the owning + resource. + items: + type: integer + type: array + requester: + description: Requester is an object reference to the requester + of the port resource, if one exists, or empty otherwise. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + status: + description: Status is the ownership status of the port. + enum: + - InUse + - Free + - InvalidConfiguration + - InsufficientResources + type: string + required: + - status + type: object + type: array + status: + description: Status is the current status of the port manager. + enum: + - Ready + - SystemConfigurationNotFound + type: string + required: + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60..8bf0e5b7 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 00000000..94b9c443 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d5..a9e0d45c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf..3e8b1325 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 00000000..7e3145a2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807..9216e0a4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a8216..26df13bb 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a..e86f1b5f 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2..1c1638fd 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/.editorconfig new file mode 100644 index 00000000..b0c95367 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/.gitattributes new file mode 100644 index 00000000..176a458f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/.gitignore new file mode 100644 index 00000000..5e3002f8 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md new file mode 100644 index 00000000..61d8ebff --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -0,0 +1,364 @@ +# Changelog + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/LICENSE.txt new file mode 100644 index 00000000..f311b1ea --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/README.md new file mode 100644 index 00000000..72579471 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/Taskfile.yml new file mode 100644 index 00000000..cdcfd223 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '2' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go new file mode 100644 index 00000000..d06e516d --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/date.go new file mode 100644 index 00000000..ed022dda --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/defaults.go new file mode 100644 index 00000000..b9f97966 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/dict.go new file mode 100644 index 00000000..77ebc61b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/doc.go new file mode 100644 index 00000000..aabb9d44 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/functions.go new file mode 100644 index 00000000..5ea74f89 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/list.go new file mode 100644 index 00000000..ca0fbb78 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/network.go new file mode 100644 index 00000000..108d78a9 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/numeric.go new file mode 100644 index 00000000..98cbb37a --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/reflect.go new file mode 100644 index 00000000..8a65c132 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go new file mode 100644 index 00000000..fab55101 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/strings.go new file mode 100644 index 00000000..3c62d6b6 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/url.go new file mode 100644 index 00000000..b8e120e1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 00000000..fd736cb1 --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 00000000..8c8c37d2 --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 00000000..c8a1beb8 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,588 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + if strings.HasPrefix(m.File, "[kernel.kallsyms]") { + m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + } + + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 00000000..c794b939 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 00000000..bef1d604 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 00000000..91f45e53 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 00000000..8d07fd6c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 00000000..90c5723d --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,568 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 00000000..4ec00fe7 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,813 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 00000000..a15696ba --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 00000000..b2f9fd54 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 05a41bd6..9d901bd2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,191 @@ +## 2.8.4 + +### Features +- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2] +- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589] + +### Fixes +- rename tools hack to see if it fixes things for downstream users [a8bb39a] + +### Maintenance +- Bump golang.org/x/text (#1144) [41b2a8a] +- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583] + +## 2.8.3 + +Released to fix security issue in golang.org/x/net dependency + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e] +- remove tools.go hack from documentation [0718693] + +## 2.8.2 + +Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod. + +### Maintenance + +- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a] +- Fix minor typos (#1138) [e1e9723] +- Fix link in V2 Migration Guide (#1137) [a588f60] + +## 2.8.1 + +### Fixes +- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a] +- don't run ReportEntries through sprintf [febbe38] + +### Maintenance +- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860] +- test: update matrix for Go 1.20 (#1130) [4890a62] +- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638] +- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd] +- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649] +- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042] +- Update index.md with instructions on how to upgrade Ginkgo [833a75e] + +## 2.8.0 + +### Features + +- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556] + +Modeled after `testing.T.Helper()`. Now, rather than write code like: + +```go +func helper(model Model) { + Expect(model).WithOffset(1).To(BeValid()) + Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write: + +```go +func helper(model Model) { + GinkgoHelper() + Expect(model).To(BeValid()) + Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c] + +You can now write code like this: + +```go +BeforeSuite(func() { + if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) { + // do slow setup + } + + if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) { + // do fast setup + } +}) +``` + +to programmatically check whether a given set of labels will match the configured `--label-filter`. + +### Maintenance + +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e] +- cdeql: add ruby language (#1124) [9dd275b] +- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd] + +## 2.7.1 + +### Fixes +- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6] +- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2] +- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa] +- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480] + +## 2.7.0 + +### Features +- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails. +- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242] +- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98] +- Color aliases for custom color support (#1101) [49fab7a] + +### Fixes +- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20] +- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container. +- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b] + +## 2.6.1 + +### Features +- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1] + +### Fixes +- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823] +- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e] + +## 2.6.0 + +### Features +- `ReportBeforeSuite` provides access to the suite report before the suite begins. +- Add junit config option for omitting leafnodetype (#1088) [956e6d2] +- Add support to customize junit report config to omit spec labels (#1087) [de44005] + +### Fixes +- Fix stack trace pruning so that it has a chance of working on windows [2165648] + +## 2.5.1 + +### Fixes +- skipped tests only show as 'S' when running with -v [3ab38ae] +- Fix typo in docs/index.md (#1082) [55fc58d] +- Fix typo in docs/index.md (#1081) [8a14f1f] +- Fix link notation in docs/index.md (#1080) [2669612] +- Fix typo in `--progress` deprecation message (#1076) [b4b7edc] + +### Maintenance +- chore: Included githubactions in the dependabot config (#976) [baea341] +- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297] + +## 2.5.0 + +### Ginkgo output now includes a timeline-view of the spec + +This commit changes Ginkgo's default output. Spec details are now +presented as a **timeline** that includes events that occur during the spec +lifecycle interleaved with any GinkgoWriter content. This makes is much easier +to understand the flow of a spec and where a given failure occurs. + +The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags +and the SuppressProgressReporting decorator have all been deprecated. Instead +the existing -v and -vv flags better capture the level of verbosity to display. However, +a new --show-node-events flag is added to include node `> Enter` and `< Exit` events +in the spec timeline. + +In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit +reports can be configured and generated using +`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)` + +Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that +was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format +to build tooling on top of as it has stronger guarantees to be stable from version to version. + +### Features +- Provide details about which timeout expired [0f2fa27] + +### Fixes +- Add Support Policy to docs [c70867a] + +### Maintenance +- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2] + ## 2.4.0 ### Features @@ -8,7 +196,7 @@ ### Fixes - correcting some typos (#1064) [1403d3c] -- fix flaky internal_integration interupt specs [2105ba3] +- fix flaky internal_integration interrupt specs [2105ba3] - Correct busted link in README [be6b5b9] ### Maintenance diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index eda4116a..d0473a46 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -4,11 +4,7 @@ --- -# Ginkgo 2.0 is now Generally Available! - -You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)! - ---- +# Ginkgo Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: @@ -33,53 +29,53 @@ Describe("Checking books out of the library", Label("library"), func() { }) When("the library has the book in question", func() { - BeforeEach(func() { - Expect(library.Store(book)).To(Succeed()) + BeforeEach(func(ctx SpecContext) { + Expect(library.Store(ctx, book)).To(Succeed()) }) Context("and the book is available", func() { - It("lends it to the reader", func() { - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + It("lends it to the reader", func(ctx SpecContext) { + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) Expect(valjean.Books()).To(ContainElement(book)) - Expect(library.UserWithBook(book)).To(Equal(valjean)) - }) + Expect(library.UserWithBook(ctx, book)).To(Equal(valjean)) + }, SpecTimeout(time.Second * 5)) }) Context("but the book has already been checked out", func() { var javert *users.User - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { javert = users.NewUser("Javert") - Expect(javert.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed()) }) - It("tells the user", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the user", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is currently checked out")) - }) + }, SpecTimeout(time.Second * 5)) - It("lets the user place a hold and get notified later", func() { - Expect(valjean.Hold(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Holds()).To(ContainElement(book)) + It("lets the user place a hold and get notified later", func(ctx SpecContext) { + Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds(ctx)).To(ContainElement(book)) By("when Javert returns the book") - Expect(javert.Return(library, book)).To(Succeed()) + Expect(javert.Return(ctx, library, book)).To(Succeed()) By("it eventually informs Valjean") notification := "Les Miserables is ready for pick up" - Eventually(valjean.Notifications).Should(ContainElement(notification)) + Eventually(ctx, valjean.Notifications).Should(ContainElement(notification)) - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Books()).To(ContainElement(book)) - Expect(valjean.Holds()).To(BeEmpty()) - }) + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books(ctx)).To(ContainElement(book)) + Expect(valjean.Holds(ctx)).To(BeEmpty()) + }, SpecTimeout(time.Second * 10)) }) }) When("the library does not have the book in question", func() { - It("tells the reader the book is unavailable", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the reader the book is unavailable", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is not in the library catalog")) - }) + }, SpecTimeout(time.Second * 5)) }) }) ``` @@ -92,7 +88,7 @@ If you have a question, comment, bug report, feature request, etc. please open a Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. -With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs). At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as @@ -100,7 +96,7 @@ At runtime, Ginkgo can run your specs in reproducibly [random order](https://ons ginkgo -p ``` -By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up. As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 4ea63b84..0b13b0d2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -21,7 +21,6 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/go-logr/logr" "github.com/onsi/ginkgo/v2/formatter" @@ -93,11 +92,11 @@ type GinkgoWriterInterface interface { } /* -SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integratoin with Ginkgo). +SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo). You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods. -Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interupt signal) or when a node has exceeded it's allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. +Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. */ type SpecContext = internal.SpecContext @@ -164,6 +163,29 @@ func GinkgoParallelProcess() int { return suiteConfig.ParallelProcess } +/* +GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred. + +This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega. +*/ +func GinkgoHelper() { + types.MarkAsHelper(1) +} + +/* +GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`. + +You can use this to manually check if a set of labels would satisfy the filter via: + + if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) { + //... + } +*/ +func GinkgoLabelFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.LabelFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -276,7 +298,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { } writer := GinkgoWriter.(*internal.Writer) - if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 { + if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 { writer.SetMode(internal.WriterModeStreamAndBuffer) } else { writer.SetMode(internal.WriterModeBufferOnly) @@ -370,6 +392,12 @@ func AbortSuite(message string, callerSkip ...int) { panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) } +/* +ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling +the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer. +*/ +type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() } + /* GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that @@ -385,6 +413,9 @@ You can learn more about how Ginkgo manages failures here: https://onsi.github.i func GinkgoRecover() { e := recover() if e != nil { + if _, ok := e.(ignorablePanic); ok { + return + } global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) } } @@ -513,31 +544,7 @@ Note that By does not generate a new Ginkgo node - rather it is simply synctacti You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by */ func By(text string, callback ...func()) { - if !global.Suite.InRunPhase() { - exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1))) - } - value := struct { - Text string - Duration time.Duration - }{ - Text: text, - } - t := time.Now() - global.Suite.SetProgressStepCursor(internal.ProgressStepCursor{ - Text: text, - CodeLocation: types.NewCodeLocation(1), - StartTime: t, - }) - AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) - formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) - GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) - if len(callback) == 1 { - callback[0]() - value.Duration = time.Since(t) - } - if len(callback) > 1 { - panic("just one callback per By, please") - } + exitIfErr(global.Suite.By(text, callback...)) } /* @@ -736,7 +743,7 @@ For example: os.SetEnv("FOO", "BAR") }) -will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. +will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. Similarly: diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index e43d9cbb..c65af4ce 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -46,7 +46,7 @@ const Pending = internal.Pending /* Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. -Tests in ordered containers cannot be marked as serial - mark the ordered container instead. +Specs in ordered containers cannot be marked as serial - mark the ordered container instead. You can learn more here: https://onsi.github.io/ginkgo/#serial-specs You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference @@ -54,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat const Serial = internal.Serial /* -Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear. They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers @@ -62,6 +62,16 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat */ const Ordered = internal.Ordered +/* +ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed. + +ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const ContinueOnFailure = internal.ContinueOnFailure + /* OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 43b16211..743555dd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strconv" "strings" ) @@ -50,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter { } func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ @@ -57,18 +89,18 @@ func New(colorMode ColorMode) Formatter { "bold": "\x1b[1m", "underline": "\x1b[4m", - "red": "\x1b[38;5;9m", - "orange": "\x1b[38;5;214m", - "coral": "\x1b[38;5;204m", - "magenta": "\x1b[38;5;13m", - "green": "\x1b[38;5;10m", - "dark-green": "\x1b[38;5;28m", - "yellow": "\x1b[38;5;11m", - "light-yellow": "\x1b[38;5;228m", - "cyan": "\x1b[38;5;14m", - "gray": "\x1b[38;5;243m", - "light-gray": "\x1b[38;5;246m", - "blue": "\x1b[38;5;12m", + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), }, } colors := []string{} @@ -88,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri } func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { - out := fmt.Sprintf(f.style(format), args...) + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } if indentation == 0 && maxWidth == 0 { return out diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 00000000..5db5d1a7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,63 @@ +package build + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + internal.VerifyCLIAndFrameworkVersion(suites) + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + fmt.Printf("Compiled %s.test\n", suite.PackageName) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 00000000..2efd2860 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 00000000..12e0e565 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 00000000..88dd8d6b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 00000000..a367a1fc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 00000000..73aff0b7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 00000000..48d23f91 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,259 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 00000000..c3470adb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,41 @@ +package generators + +var specText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 00000000..3046a448 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,64 @@ +package generators + +import ( + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 00000000..496ec4a2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,152 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./") + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 00000000..bd3c6d02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 00000000..41052ea1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 00000000..64dcb1b7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,283 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 00000000..bd9ca7d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 00000000..9da1bab3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 00000000..6c61f09d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 00000000..e9abb27d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 00000000..0b9b19fe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,302 @@ +package outline + +import ( + "github.com/onsi/ginkgo/v2/types" + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + for _, label := range ls { + labels = append(labels, label) + } + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 00000000..67ec5ab7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 00000000..c2327cda --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,110 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 00000000..36698d46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 00000000..aaed4d57 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 00000000..7dd29439 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 00000000..6c485c5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 00000000..26418ac6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 00000000..f5ddff30 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 00000000..e9f7ec0c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 00000000..b4892beb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 00000000..53272df7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 00000000..bde4193c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go new file mode 100644 index 00000000..85162720 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go @@ -0,0 +1,8 @@ +//go:build ginkgoclidependencies +// +build ginkgoclidependencies + +package ginkgo + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 1beeb114..c5a7eb94 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -7,7 +7,7 @@ GinkgoT() implements an interface analogous to *testing.T and can be used with third-party libraries that accept *testing.T through an interface. GinkgoT() takes an optional offset argument that can be used to get the -correct line number associated with the failure. +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index c76c3bd9..ae1b7b01 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -94,15 +94,19 @@ type group struct { runOncePairs map[uint]runOncePairs runOnceTracker map[runOncePair]types.SpecState - succeeded bool + succeeded bool + failedInARunOnceBefore bool + continueOnFailure bool } func newGroup(suite *Suite) *group { return &group{ - suite: suite, - runOncePairs: map[uint]runOncePairs{}, - runOnceTracker: map[runOncePair]types.SpecState{}, - succeeded: true, + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + failedInARunOnceBefore: false, + continueOnFailure: false, } } @@ -116,6 +120,7 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport { LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), IsSerial: spec.Nodes.HasNodeMarkedSerial(), IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), @@ -136,10 +141,14 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { return types.SpecStateSkipped, types.Failure{} } - if !g.succeeded { + if !g.succeeded && !g.continueOnFailure { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), "Spec skipped because an earlier spec in an ordered container failed") } + if g.failedInARunOnceBefore && g.continueOnFailure { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because a BeforeAll node failed") + } beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) for _, pair := range beforeOncePairs { if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { @@ -167,7 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { return lastSpecID == specID } -func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool { + failedInARunOnceBefore := false pairs := g.runOncePairs[spec.SubjectID()] nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) @@ -193,6 +203,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { } if g.suite.currentSpecReport.State != types.SpecStatePassed { terminatingNode, terminatingPair = node, oncePair + failedInARunOnceBefore = !terminatingPair.isZero() break } } @@ -215,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { //this node has already been run on this attempt, don't rerun it return false } - pair := runOncePair{} + var pair runOncePair switch node.NodeType { case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: // check if we were generated in an AfterNode that has already run @@ -245,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed... if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + if g.continueOnFailure { + return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run) + } else { + return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run + } } if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { @@ -280,10 +295,12 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { includeDeferCleanups = true } + return failedInARunOnceBefore } func (g *group) run(specs Specs) { g.specs = specs + g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure for _, spec := range g.specs { g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) } @@ -300,8 +317,8 @@ func (g *group) run(specs Specs) { skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) g.suite.currentSpecReport.StartTime = time.Now() + failedInARunOnceBefore := false if !skip { - var maxAttempts = 1 if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { @@ -319,14 +336,14 @@ func (g *group) run(specs Specs) { g.suite.outputInterceptor.StartInterceptingOutput() if attempt > 0 { if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Passed. Repeating...\n", attempt) + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt}) } if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt}) } } - g.attemptSpec(attempt == maxAttempts-1, spec) + failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec) g.suite.currentSpecReport.EndTime = time.Now() g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) @@ -341,6 +358,10 @@ func (g *group) run(specs Specs) { if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { break + } else if attempt < maxAttempts-1 { + af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure} + af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message) + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af) } } } @@ -350,6 +371,7 @@ func (g *group) run(specs Specs) { g.suite.processCurrentSpecReport() if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { g.succeeded = false + g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore } g.suite.selectiveLock.Lock() g.suite.currentSpecReport = types.SpecReport{} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 9eb835e9..0869bffb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -44,23 +44,23 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportAfterSuiteBody func(types.Report) - - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedOncePerOrdered bool - MarkedSuppressProgressReporting bool - FlakeAttempts int - MustPassRepeatedly int - Labels Labels - PollProgressAfter time.Duration - PollProgressInterval time.Duration - NodeTimeout time.Duration - SpecTimeout time.Duration - GracePeriod time.Duration + ReportEachBody func(types.SpecReport) + ReportSuiteBody func(types.Report) + + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration NodeIDWhereCleanupWasGenerated uint } @@ -70,6 +70,7 @@ type focusType bool type pendingType bool type serialType bool type orderedType bool +type continueOnFailureType bool type honorsOrderedType bool type suppressProgressReporting bool @@ -77,6 +78,7 @@ const Focus = focusType(true) const Pending = pendingType(true) const Serial = serialType(true) const Ordered = orderedType(true) +const ContinueOnFailure = continueOnFailureType(true) const OncePerOrdered = honorsOrderedType(true) const SuppressProgressReporting = suppressProgressReporting(true) @@ -91,6 +93,10 @@ type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +func (l Labels) MatchesLabelFilter(query string) bool { + return types.MustParseLabelFilter(query)(l) +} + func UnionOfLabels(labels ...Labels) Labels { out := Labels{} seen := map[string]bool{} @@ -134,6 +140,8 @@ func isDecoration(arg interface{}) bool { return true case t == reflect.TypeOf(Ordered): return true + case t == reflect.TypeOf(ContinueOnFailure): + return true case t == reflect.TypeOf(OncePerOrdered): return true case t == reflect.TypeOf(SuppressProgressReporting): @@ -242,16 +250,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if !nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) } + case t == reflect.TypeOf(ContinueOnFailure): + node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure")) + } case t == reflect.TypeOf(OncePerOrdered): node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) } case t == reflect.TypeOf(SuppressProgressReporting): - node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting)) - if nodeType.Is(types.NodeTypeContainer) { - appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting")) - } + deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting()) case t == reflect.TypeOf(FlakeAttempts(0)): node.FlakeAttempts = int(arg.(FlakeAttempts)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -321,9 +331,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError = true break } - } else if nodeType.Is(types.NodeTypeReportAfterSuite) { - if node.ReportAfterSuiteBody == nil { - node.ReportAfterSuiteBody = arg.(func(types.Report)) + } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { + if node.ReportSuiteBody == nil { + node.ReportSuiteBody = arg.(func(types.Report)) } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -390,13 +400,17 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } + if node.MarkedContinueOnFailure && !node.MarkedOrdered { + appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)) + } + hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) } - if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { + if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 161be820..7ed43c7f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -7,6 +7,58 @@ import ( "github.com/onsi/ginkgo/v2/types" ) +type SortableSpecs struct { + Specs Specs + Indexes []int +} + +func NewSortableSpecs(specs Specs) *SortableSpecs { + indexes := make([]int, len(specs)) + for i := range specs { + indexes[i] = i + } + return &SortableSpecs{ + Specs: specs, + Indexes: indexes, + } +} +func (s *SortableSpecs) Len() int { return len(s.Indexes) } +func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] } +func (s *SortableSpecs) Less(i, j int) bool { + a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] + + firstOrderedA := a.Nodes.FirstNodeMarkedOrdered() + firstOrderedB := b.Nodes.FirstNodeMarkedOrdered() + if firstOrderedA.ID == firstOrderedB.ID && !firstOrderedA.IsZero() { + // strictly preserve order in ordered containers. ID will track this as IDs are generated monotonically + return a.FirstNodeWithType(types.NodeTypeIt).ID < b.FirstNodeWithType(types.NodeTypeIt).ID + } + + aCLs := a.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() + bCLs := b.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() + for i := 0; i < len(aCLs) && i < len(bCLs); i++ { + aCL, bCL := aCLs[i], bCLs[i] + if aCL.FileName < bCL.FileName { + return true + } else if aCL.FileName > bCL.FileName { + return false + } + if aCL.LineNumber < bCL.LineNumber { + return true + } else if aCL.LineNumber > bCL.LineNumber { + return false + } + } + // either everything is equal or we have different lengths of CLs + if len(aCLs) < len(bCLs) { + return true + } else if len(aCLs) > len(bCLs) { + return false + } + // ok, now we are sure everything was equal. so we use the spec text to break ties + return a.Text() < b.Text() +} + type GroupedSpecIndices []SpecIndices type SpecIndices []int @@ -28,12 +80,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // Seed a new random source based on thee configured random seed. r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) - // first break things into execution groups + // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs. + sortableSpecs := NewSortableSpecs(specs) + sort.Sort(sortableSpecs) + + // then we break things into execution groups // a group represents a single unit of execution and is a collection of SpecIndices // usually a group is just a single spec, however ordered containers must be preserved as a single group executionGroupIDs := []uint{} executionGroups := map[uint]SpecIndices{} - for idx, spec := range specs { + for _, idx := range sortableSpecs.Indexes { + spec := specs[idx] groupNode := spec.Nodes.FirstNodeMarkedOrdered() if groupNode.IsZero() { groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) @@ -48,7 +105,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs shufflableGroupingIDs := []uint{} shufflableGroupingIDToGroupIDs := map[uint][]uint{} - shufflableGroupingsIDToSortKeys := map[uint]string{} // for each execution group we're going to have to pick a node to represent how the // execution group is grouped for shuffling: @@ -57,7 +113,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, nodeTypesToShuffle = types.NodeTypeIt } - //so, fo reach execution group: + //so, for each execution group: for _, groupID := range executionGroupIDs { // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] @@ -72,22 +128,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { // record the shuffleable group ID shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) - // and record the sort key to use - shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() } } - // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id - sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { - keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] - keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] - if keyA == keyB { - return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] - } else { - return keyA < keyB - } - }) - // now we permute the sorted shufflable grouping IDs and build the ordered Groups orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index b417bf5b..b3cd6429 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -42,6 +42,8 @@ type Client interface { PostSuiteWillBegin(report types.Report) error PostDidRun(report types.SpecReport) error PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) BlockUntilNonprimaryProcsHaveFinished() error diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index ad9932f2..6547c7a6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -98,6 +98,19 @@ func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) er return client.post("/progress-report", report) } +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index fa3ac682..d2c71ab1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -26,7 +26,7 @@ type httpServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -38,7 +38,7 @@ func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *httpServer) Start() { httpServer := &http.Server{} mux := http.NewServeMux() @@ -52,6 +52,8 @@ func (server *httpServer) Start() { mux.HandleFunc("/progress-report", server.emitProgressReport) //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) @@ -63,12 +65,12 @@ func (server *httpServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *httpServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } @@ -93,7 +95,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // Streaming Endpoints // -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { @@ -164,6 +166,23 @@ func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) } +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if !server.decode(writer, request, &beforeSuiteState) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index fe93cc2b..59e8e6fd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -76,6 +76,19 @@ func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) err return client.client.Call("Server.EmitProgressReport", report, voidReceiver) } +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index 7c6e67b9..a6d98793 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,16 +18,17 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} - outputDestination io.Writer - reporter reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteState BeforeSuiteState - parallelTotal int - counter int - counterLock *sync.Mutex - shouldAbort bool + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool numSuiteDidBegins int numSuiteDidEnds int @@ -37,11 +38,12 @@ type ServerHandler struct { func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { return &ServerHandler{ - reporter: reporter, - lock: &sync.Mutex{}, - counterLock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, outputDestination: os.Stdout, done: make(chan interface{}), @@ -140,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { return true } +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 345db544..11269cf1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -48,13 +48,10 @@ type ProgressStepCursor struct { StartTime time.Time } -func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { +func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { pr := types.ProgressReport{ - ParallelProcess: report.ParallelProcess, - RunningInParallel: isRunningInParallel, - - Time: time.Now(), - + ParallelProcess: report.ParallelProcess, + RunningInParallel: isRunningInParallel, ContainerHierarchyTexts: report.ContainerHierarchyTexts, LeafNodeText: report.LeafNodeText, LeafNodeLocation: report.LeafNodeLocation, @@ -65,14 +62,14 @@ func NewProgressReport(isRunningInParallel bool, report types.SpecReport, curren CurrentNodeLocation: currentNode.CodeLocation, CurrentNodeStartTime: currentNodeStartTime, - CurrentStepText: currentStep.Text, + CurrentStepText: currentStep.Message, CurrentStepLocation: currentStep.CodeLocation, - CurrentStepStartTime: currentStep.StartTime, + CurrentStepStartTime: currentStep.TimelineLocation.Time, AdditionalReports: additionalReports, CapturedGinkgoWriterOutput: gwOutput, - GinkgoWriterOffset: len(gwOutput), + TimelineLocation: timelineLocation, } goroutines, err := extractRunningGoroutines() @@ -186,7 +183,6 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { break } } - r := bufio.NewReader(bytes.NewReader(stack)) out := []types.Goroutine{} idx := -1 @@ -234,12 +230,12 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) } line = strings.TrimLeft(line, " \t") - fields := strings.SplitN(line, ":", 2) - if len(fields) != 2 { - return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line)) + delimiterIdx := strings.LastIndex(line, ":") + if delimiterIdx == -1 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line)) } - functionCall.Filename = fields[0] - line = strings.Split(fields[1], " ")[0] + functionCall.Filename = line[:delimiterIdx] + line = strings.Split(line[delimiterIdx+1:], " ")[0] lineNumber, err := strconv.ParseInt(line, 10, 64) functionCall.Line = int(lineNumber) if err != nil { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go index 74199f39..cc351a39 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -1,7 +1,6 @@ package internal import ( - "reflect" "time" "github.com/onsi/ginkgo/v2/types" @@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, - Time: time.Now(), Location: cl, + Time: time.Now(), } var didSetValue = false for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(types.ReportEntryVisibilityAlways): - out.Visibility = arg.(types.ReportEntryVisibility) - case reflect.TypeOf(types.CodeLocation{}): - out.Location = arg.(types.CodeLocation) - case reflect.TypeOf(Offset(0)): - out.Location = types.NewCodeLocation(2 + int(arg.(Offset))) - case reflect.TypeOf(out.Time): - out.Time = arg.(time.Time) + switch x := arg.(type) { + case types.ReportEntryVisibility: + out.Visibility = x + case types.CodeLocation: + out.Location = x + case Offset: + out.Location = types.NewCodeLocation(2 + int(x)) + case time.Time: + out.Time = x default: if didSetValue { return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 432bd217..5ddf1007 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -44,7 +44,8 @@ type Suite struct { currentSpecContext *specContext - progressStepCursor ProgressStepCursor + currentByStep types.SpecEvent + timelineOrder int /* We don't need to lock around all operations. Just those that *could* happen concurrently. @@ -128,7 +129,7 @@ func (suite *Suite) PushNode(node Node) error { return suite.pushCleanupNode(node) } - if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) { + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { return suite.pushSuiteNode(node) } @@ -150,6 +151,13 @@ func (suite *Suite) PushNode(node Node) error { } } + if node.MarkedContinueOnFailure { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() { + return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation) + } + } + if node.NodeType == types.NodeTypeContainer { // During PhaseBuildTopLevel we only track the top level containers without entering them // We only enter the top level container nodes during PhaseBuildTree @@ -221,7 +229,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeType = types.NodeTypeCleanupAfterSuite case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: node.NodeType = types.NodeTypeCleanupAfterAll - case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite: + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite: return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) @@ -236,19 +244,69 @@ func (suite *Suite) pushCleanupNode(node Node) error { return nil } -/* - Pushing and popping the Step Cursor stack -*/ - -func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) { +func (suite *Suite) generateTimelineLocation() types.TimelineLocation { suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() - suite.progressStepCursor = cursor + suite.timelineOrder += 1 + return types.TimelineLocation{ + Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(), + Order: suite.timelineOrder, + Time: time.Now(), + } +} + +func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent { + event.TimelineLocation = suite.generateTimelineLocation() + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) + return event +} + +func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) { + event := startEvent + event.SpecEventType = eventType + event.TimelineLocation = suite.generateTimelineLocation() + event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time) + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) +} + +func (suite *Suite) By(text string, callback ...func()) error { + cl := types.NewCodeLocation(2) + if suite.phase != PhaseRun { + return types.GinkgoErrors.ByNotDuringRunPhase(cl) + } + + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventByStart, + CodeLocation: cl, + Message: text, + }) + suite.selectiveLock.Lock() + suite.currentByStep = event + suite.selectiveLock.Unlock() + + if len(callback) == 1 { + defer func() { + suite.selectiveLock.Lock() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() + suite.handleSpecEventEnd(types.SpecEventByEnd, event) + }() + callback[0]() + } else if len(callback) > 1 { + panic("just one callback per By, please") + } + return nil } /* - Spec Running methods - used during PhaseRun +Spec Running methods - used during PhaseRun */ func (suite *Suite) CurrentSpecReport() types.SpecReport { suite.selectiveLock.Lock() @@ -263,16 +321,20 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport { } func (suite *Suite) AddReportEntry(entry ReportEntry) error { - suite.selectiveLock.Lock() - defer suite.selectiveLock.Unlock() if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) } + entry.TimelineLocation = suite.generateTimelineLocation() + entry.Time = entry.TimelineLocation.Time + suite.selectiveLock.Lock() suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + suite.selectiveLock.Unlock() + suite.reporter.EmitReportEntry(entry) return nil } func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { + timelineLocation := suite.generateTimelineLocation() suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() @@ -280,10 +342,8 @@ func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport if suite.currentSpecContext != nil { additionalReports = suite.currentSpecContext.QueryProgressReporters() } - stepCursor := suite.progressStepCursor - gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) - pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport) + pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport) if err != nil { fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) @@ -355,7 +415,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } suite.report.SuiteSucceeded = true - suite.runBeforeSuite(numSpecsThatWillBeRun) + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite) + + ranBeforeSuite := suite.report.SuiteSucceeded + if suite.report.SuiteSucceeded { + suite.runBeforeSuite(numSpecsThatWillBeRun) + } if suite.report.SuiteSucceeded { groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) @@ -394,7 +460,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } } - suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + if ranBeforeSuite { + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + } interruptStatus := suite.interruptHandler.Status() if interruptStatus.Interrupted() { @@ -408,9 +476,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s suite.report.SuiteSucceeded = false } - if suite.config.ParallelProcess == 1 { - suite.runReportAfterSuite() - } + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite) suite.reporter.SuiteDidEnd(suite.report) if suite.isRunningInParallel() { suite.client.PostSuiteDidEnd(suite.report) @@ -424,9 +490,10 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: beforeSuiteNode.NodeType, - LeafNodeLocation: beforeSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() @@ -445,9 +512,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: afterSuiteNode.NodeType, - LeafNodeLocation: afterSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() @@ -461,9 +529,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { for _, cleanupNode := range afterSuiteCleanup { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: cleanupNode.NodeType, - LeafNodeLocation: cleanupNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() @@ -474,23 +543,6 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { } } -func (suite *Suite) runReportAfterSuite() { - for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { - suite.selectiveLock.Lock() - suite.currentSpecReport = types.SpecReport{ - LeafNodeType: node.NodeType, - LeafNodeLocation: node.CodeLocation, - LeafNodeText: node.Text, - ParallelProcess: suite.config.ParallelProcess, - } - suite.selectiveLock.Unlock() - - suite.reporter.WillRun(suite.currentSpecReport) - suite.runReportAfterSuiteNode(node, suite.report) - suite.processCurrentSpecReport() - } -} - func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { nodes := spec.Nodes.WithType(nodeType) if nodeType == types.NodeTypeReportAfterEach { @@ -608,39 +660,80 @@ func (suite *Suite) runSuiteNode(node Node) { if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) } suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() +} + +func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) { + nodes := suite.suiteNodes.WithType(nodeType) + // only run ReportAfterSuite on proc 1 + if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 { + return + } + // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 { + state, err := suite.client.BlockUntilReportBeforeSuiteCompleted() + if err != nil || state.Is(types.SpecStateFailed) { + suite.report.SuiteSucceeded = false + } + return + } + + for _, node := range nodes { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } - return + // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 { + if suite.report.SuiteSucceeded { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed) + } else { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed) + } + } } -func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { +func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() - if suite.config.ParallelTotal > 1 { + // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and + // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it + if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() { aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() if err != nil { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) return } report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) } + node.Body = func(SpecContext) { node.ReportSuiteBody(report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() - - return } func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { @@ -662,7 +755,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ suite.selectiveLock.Lock() suite.currentNode = node suite.currentNodeStartTime = time.Now() - suite.progressStepCursor = ProgressStepCursor{} + suite.currentByStep = types.SpecEvent{} suite.selectiveLock.Unlock() defer func() { suite.selectiveLock.Lock() @@ -671,13 +764,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ suite.selectiveLock.Unlock() }() - if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting { - if text == "" { - text = "TOP-LEVEL" - } - s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String()) - suite.writer.Write([]byte(s)) + if text == "" { + text = "TOP-LEVEL" } + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventNodeStart, + NodeType: node.NodeType, + Message: text, + CodeLocation: node.CodeLocation, + }) + defer func() { + suite.handleSpecEventEnd(types.SpecEventNodeEnd, event) + }() var failure types.Failure failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation @@ -697,18 +795,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ now := time.Now() deadline := suite.deadline + timeoutInPlay := "suite" if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { deadline = specDeadline + timeoutInPlay = "spec" } if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" } else { deadline = now.Add(gracePeriod) + timeoutInPlay = "grace period" } } @@ -743,6 +846,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } outcomeFromRun, failureFromRun := suite.failer.Drain() + failureFromRun.TimelineLocation = suite.generateTimelineLocation() outcomeC <- outcomeFromRun failureC <- failureFromRun }() @@ -772,23 +876,33 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ select { case outcomeFromRun := <-outcomeC: failureFromRun := <-failureC - if outcome == types.SpecStateInterrupted { - // we've already been interrupted. we just managed to actually exit + if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) { + // we've already been interrupted/timed out. we just managed to actually exit // before the grace period elapsed - return outcome, failure - } else if outcome == types.SpecStateTimedout { - // we've already timed out. we just managed to actually exit - // before the grace period elapsed. if we have a failure message we should include it + // if we have a failure message we attach it as an additional failure if outcomeFromRun != types.SpecStatePassed { - failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic - failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message + additionalFailure := types.AdditionalFailure{ + State: outcomeFromRun, + Failure: failure, //we make a copy - this will include all the configuration set up above... + } + //...and then we update the failure with the details from failureFromRun + additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + additionalFailure.Failure.ProgressReport = types.ProgressReport{} + if outcome == types.SpecStateTimedout { + additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message) + } else { + additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message) + } + suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure) + failure.AdditionalFailure = &additionalFailure } return outcome, failure } if outcomeFromRun.Is(types.SpecStatePassed) { return outcomeFromRun, types.Failure{} } else { - failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic + failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + suite.reporter.EmitFailure(outcomeFromRun, failure) return outcomeFromRun, failure } case <-gracePeriodChannel: @@ -801,10 +915,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ case <-deadlineChannel: // we're out of time - the outcome is a timeout and we capture the failure and progress report outcome = types.SpecStateTimedout - failure.Message, failure.Location = "Timedout", node.CodeLocation + failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation() failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput() - failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}" + failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay) deadlineChannel = nil + suite.reporter.EmitFailure(outcome, failure) + // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck sc.cancel() @@ -814,36 +930,36 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ interruptStatus = suite.interruptHandler.Status() deadlineChannel = nil // don't worry about deadlines, time's up now + failureTimelineLocation := suite.generateTimelineLocation() + progressReport := suite.generateProgressReport(true) + if outcome == types.SpecStateInvalid { outcome = types.SpecStateInterrupted - failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation + failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation if interruptStatus.ShouldIncludeProgressReport() { - failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput() failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}" } + suite.reporter.EmitFailure(outcome, failure) } - var report types.ProgressReport - if interruptStatus.ShouldIncludeProgressReport() { - report = suite.generateProgressReport(false) - } - + progressReport = progressReport.WithoutOtherGoroutines() sc.cancel() if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { - report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) - suite.emitProgressReport(report) + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) + suite.emitProgressReport(progressReport) } return outcome, failure } if interruptStatus.ShouldIncludeProgressReport() { if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport { - report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly { - report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) } - suite.emitProgressReport(report) + suite.emitProgressReport(progressReport) } if gracePeriodChannel == nil { @@ -864,10 +980,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } } +// TODO: search for usages and consider if reporter.EmitFailure() is necessary func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { return types.Failure{ Message: message, Location: node.CodeLocation, + TimelineLocation: suite.generateTimelineLocation(), FailureNodeContext: types.FailureNodeIsLeafNode, FailureNodeType: node.NodeType, FailureNodeLocation: node.CodeLocation, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 2f42b264..c797c95d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -81,7 +81,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Helper() { - // No-op + types.MarkAsHelper(1) } func (t *ginkgoTestingTProxy) Log(args ...interface{}) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index da21d3b0..28a45b0f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -22,24 +22,30 @@ type WriterInterface interface { Truncate() Bytes() []byte + Len() int } -//Writer implements WriterInterface and GinkgoWriterInterface +// Writer implements WriterInterface and GinkgoWriterInterface type Writer struct { buffer *bytes.Buffer outWriter io.Writer lock *sync.Mutex mode WriterMode + streamIndent []byte + indentNext bool + teeWriters []io.Writer } func NewWriter(outWriter io.Writer) *Writer { return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - mode: WriterModeStreamAndBuffer, + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + streamIndent: []byte(" "), + indentNext: true, } } @@ -49,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) { w.mode = mode } +func (w *Writer) Len() int { + w.lock.Lock() + defer w.lock.Unlock() + return w.buffer.Len() +} + +var newline = []byte("\n") + func (w *Writer) Write(b []byte) (n int, err error) { w.lock.Lock() defer w.lock.Unlock() @@ -58,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) { } if w.mode == WriterModeStreamAndBuffer { - w.outWriter.Write(b) + line, remaining, found := []byte{}, b, false + for len(remaining) > 0 { + line, remaining, found = bytes.Cut(remaining, newline) + if len(line) > 0 { + if w.indentNext { + w.outWriter.Write(w.streamIndent) + w.indentNext = false + } + w.outWriter.Write(line) + } + if found { + w.outWriter.Write(newline) + w.indentNext = true + } + } } return w.buffer.Write(b) } @@ -78,7 +106,7 @@ func (w *Writer) Bytes() []byte { return copied } -//GinkgoWriterInterface +// GinkgoWriterInterface func (w *Writer) TeeTo(writer io.Writer) { w.lock.Lock() defer w.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index d09488c2..56b7be75 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -12,6 +12,7 @@ import ( "io" "runtime" "strings" + "sync" "time" "github.com/onsi/ginkgo/v2/formatter" @@ -23,13 +24,16 @@ type DefaultReporter struct { writer io.Writer // managing the emission stream - lastChar string + lastCharWasNewline bool lastEmissionWasDelimiter bool // rendering specDenoter string retryDenoter string formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex } func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { @@ -44,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep conf: conf, writer: writer, - lastChar: "\n", + lastCharWasNewline: true, lastEmissionWasDelimiter: false, specDenoter: "•", retryDenoter: "↺", formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, } if runtime.GOOS == "windows" { reporter.specDenoter = "+" @@ -97,173 +102,219 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { } } -func (r *DefaultReporter) WillRun(report types.SpecReport) { - if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) return } - r.emitDelimiter() - indentation := uint(0) - if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) } else { - if len(report.ContainerHierarchyTexts) > 0 { - r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) - indentation = 1 + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) } - line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) - labels := report.Labels() - if len(labels) > 0 { - line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) } - r.emitBlock(line) + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) } - r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) } -func (r *DefaultReporter) DidRun(report types.SpecReport) { +func (r *DefaultReporter) WillRun(report types.SpecReport) { v := r.conf.Verbosity() - var header, highlightColor string - includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter - succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} - hasGW := report.CapturedGinkgoWriterOutput != "" - hasStd := report.CapturedStdOutErr != "" - hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - denoter = fmt.Sprintf("[%s]", report.LeafNodeType) + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } } - highlightColor = r.highlightColorForState(report.State) + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) switch report.State { case types.SpecStatePassed: - succinctLocationBlock = v.LT(types.VerbosityLevelVerbose) - emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { - header = fmt.Sprintf("%s PASSED", denoter) - } else { - return - } - } else { - header, stream = denoter, true - if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { - header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false - } - if report.RunTime > r.conf.SlowSpecThreshold { - header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false - } + header = fmt.Sprintf("%s PASSED", header) } - if hasStd || emitGinkgoWriterOutput || hasEmittableReports { - stream = false + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true } case types.SpecStatePending: - includeRuntime, emitGinkgoWriterOutput = false, false - if v.Is(types.VerbosityLevelSuccinct) { - header, stream = "P", true - } else { - header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true } case types.SpecStateSkipped: - if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { - header = "S [SKIPPED]" - } else { - header, stream = "S", true + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) } - case types.SpecStateFailed: - header = fmt.Sprintf("%s [FAILED]", denoter) - case types.SpecStateTimedout: - header = fmt.Sprintf("%s [TIMEDOUT]", denoter) - case types.SpecStatePanicked: - header = fmt.Sprintf("%s! [PANICKED]", denoter) - case types.SpecStateInterrupted: - header = fmt.Sprintf("%s! [INTERRUPTED]", denoter) - case types.SpecStateAborted: - header = fmt.Sprintf("%s! [ABORTED]", denoter) } - if report.State.Is(types.SpecStateFailureStates) && report.MaxMustPassRepeatedly > 1 { - header, stream = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts), false - } - // Emit stream and return - if stream { + // If we have no content to show, jsut emit the header and return + if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) return } - // Emit header - r.emitDelimiter() if includeRuntime { header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) } - r.emitBlock(r.f(highlightColor + header + "{{/}}")) - // Emit Code Location Block - r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } //Emit Stdout/Stderr Output - if hasStd { + if showSeparateStdSection { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) } - //Emit Captured GinkgoWriter Output - if emitGinkgoWriterOutput && hasGW { + if showSeparateVisibilityAlwaysReportsSection { r.emitBlock("\n") - r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0) + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) } - if hasEmittableReports { + if showTimeline { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) - reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) - if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { - reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) - } - for _, entry := range reportEntries { - r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) - if representation := entry.StringRepresentation(); representation != "" { - r.emitBlock(r.fi(3, representation)) - } - } - r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) } // Emit Failure Message - if !report.Failure.IsZero() { + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { r.emitBlock("\n") - r.EmitFailure(1, report.State, report.Failure, false) - } - - if len(report.AdditionalFailures) > 0 { - if v.GTE(types.VerbosityLevelVerbose) { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure:{{/}}")) - for i, additionalFailure := range report.AdditionalFailures { - r.EmitFailure(2, additionalFailure.State, additionalFailure.Failure, true) - if i < len(report.AdditionalFailures)-1 { - r.emitBlock(r.fi(2, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) - } - } - } else { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure. Here's a summary - for full details run Ginkgo in verbose mode:{{/}}")) - for _, additionalFailure := range report.AdditionalFailures { - r.emitBlock(r.fi(2, r.highlightColorForState(additionalFailure.State)+"[%s]{{/}} in [%s] at %s", - r.humanReadableState(additionalFailure.State), - additionalFailure.Failure.FailureNodeType, - additionalFailure.Failure.Location, - )) - } - + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) } } - r.emitDelimiter() + r.emitDelimiter(0) } func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { @@ -293,13 +344,68 @@ func (r *DefaultReporter) humanReadableState(state types.SpecState) string { return strings.ToUpper(state.String()) } -func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failure types.Failure, includeState bool) { - highlightColor := r.highlightColorForState(state) - if includeState { - r.emitBlock(r.fi(indent, highlightColor+"[%s]{{/}}", r.humanReadableState(state))) +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) } - r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", failure.FailureNodeType, failure.Location)) +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) @@ -315,86 +421,22 @@ func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failur r.emitBlock("\n") r.emitProgressReport(indent, false, failure.ProgressReport) } -} -func (r *DefaultReporter) SuiteDidEnd(report types.Report) { - failures := report.SpecReports.WithState(types.SpecStateFailureStates) - if len(failures) > 0 { - r.emitBlock("\n\n") - if len(failures) > 1 { - r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) - } else { - r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) - } - for _, specReport := range failures { - highlightColor, heading := "{{red}}", "[FAIL]" - switch specReport.State { - case types.SpecStatePanicked: - highlightColor, heading = "{{magenta}}", "[PANICKED!]" - case types.SpecStateAborted: - highlightColor, heading = "{{coral}}", "[ABORTED]" - case types.SpecStateTimedout: - highlightColor, heading = "{{orange}}", "[TIMEDOUT]" - case types.SpecStateInterrupted: - highlightColor, heading = "{{orange}}", "[INTERRUPTED]" - } - locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) - r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) - } - } - - //summarize the suite - if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { - r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) - return - } - - r.emitBlock("\n") - color, status := "{{green}}{{bold}}", "SUCCESS!" - if !report.SuiteSucceeded { - color, status = "{{red}}{{bold}}", "FAIL!" - } - - specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes - r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", - specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), - report.PreRunStats.TotalSpecs, - report.RunTime.Seconds()), - ) - - switch len(report.SpecialSuiteFailureReasons) { - case 0: - r.emit(r.f(color+"%s{{/}} -- ", status)) - case 1: - r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) - default: - r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) - } - - if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { - r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) - } else { - r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) - r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) - if specs.CountOfFlakedSpecs() > 0 { - r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) - } - if specs.CountOfRepeatedSpecs() > 0 { - r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) - } - r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) - r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) } } func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { - r.emitDelimiter() + r.emitDelimiter(1) if report.RunningInParallel { - r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } - r.emitProgressReport(0, true, report) - r.emitDelimiter() + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) } func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { @@ -409,7 +451,7 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(" ") subjectIndent = 0 } - r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) indent += 1 } @@ -419,12 +461,12 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) } - r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) indent += 1 } if report.CurrentStepText != "" { - r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) indent += 1 } @@ -433,9 +475,19 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } - if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) { + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") - r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10) + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) } if !report.SpecGoroutine().IsZero() { @@ -471,22 +523,48 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput } } -func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) { - r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) - if limit == 0 { - r.emitBlock(r.fi(indent+1, "%s", output)) - } else { - lines := strings.Split(output, "\n") - if len(lines) <= limit { - r.emitBlock(r.fi(indent+1, "%s", output)) - } else { - r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) - for _, line := range lines[len(lines)-limit-1:] { - r.emitBlock(r.fi(indent+1, "%s", line)) - } - } +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } - r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) } func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { @@ -544,31 +622,37 @@ func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { /* Emitting to the writer */ func (r *DefaultReporter) emit(s string) { - if len(s) > 0 { - r.lastChar = s[len(s)-1:] - r.lastEmissionWasDelimiter = false - r.writer.Write([]byte(s)) - } + r._emit(s, false, false) } func (r *DefaultReporter) emitBlock(s string) { - if len(s) > 0 { - if r.lastChar != "\n" { - r.emit("\n") - } - r.emit(s) - if r.lastChar != "\n" { - r.emit("\n") - } - } + r._emit(s, true, false) +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) } -func (r *DefaultReporter) emitDelimiter() { - if r.lastEmissionWasDelimiter { +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { return } - r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) - r.lastEmissionWasDelimiter = true + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter } /* Rendering text */ @@ -584,13 +668,14 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) } -func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) } else { - texts = append(texts, report.LeafNodeText) + texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) locations = append(locations, report.LeafNodeLocation) @@ -600,24 +685,58 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo failureLocation = report.Failure.Location } + highlightIndex := -1 switch report.Failure.FailureNodeContext { case types.FailureNodeAtTopLevel: - texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex - texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) locations[i] = failureLocation + highlightIndex = i case types.FailureNodeIsLeafNode: i := len(texts) - 1 - texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 } out := "" - if succinct { - out += r.f("%s", r.cycleJoin(texts, " ")) + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } flattenedLabels := report.Labels() if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) @@ -626,17 +745,15 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) } else { - out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) - } - } else { - for i := range texts { - out += r.fi(uint(i), "%s", texts[i]) - if len(labels[i]) > 0 { - out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) } - out += "\n" - out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } + } return out } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go index 89d30076..613072eb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -35,7 +35,7 @@ func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Repor FailOnPending: report.SuiteConfig.FailOnPending, FailFast: report.SuiteConfig.FailFast, FlakeAttempts: report.SuiteConfig.FlakeAttempts, - EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + EmitSpecProgress: false, DryRun: report.SuiteConfig.DryRun, ParallelNode: report.SuiteConfig.ParallelProcess, ParallelTotal: report.SuiteConfig.ParallelTotal, diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index fcea6ab1..ca98609d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,12 +15,32 @@ import ( "fmt" "os" "strings" - "time" "github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/types" ) +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + type JUnitTestSuites struct { XMLName xml.Name `xml:"testsuites"` // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) @@ -128,6 +148,10 @@ type JUnitFailure struct { } func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { suite := JUnitTestSuite{ Name: report.SuiteDescription, Package: report.SuitePath, @@ -149,7 +173,6 @@ func GenerateJUnitReport(report types.Report, dst string) error { {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, - {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, @@ -157,22 +180,33 @@ func GenerateJUnitReport(report types.Report, dst string) error { }, } for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } if spec.FullText() != "" { name = name + " " + spec.FullText() } labels := spec.Labels() - if len(labels) > 0 { + if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + name = strings.TrimSpace(name) test := JUnitTestCase{ Name: name, Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), - SystemOut: systemOutForUnstructuredReporters(spec), - SystemErr: systemErrForUnstructuredReporters(spec), + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) } suite.Tests += 1 @@ -193,6 +227,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "failed", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } suite.Failures += 1 case types.SpecStateTimedout: test.Failure = &JUnitFailure{ @@ -200,6 +237,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "timedout", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } suite.Failures += 1 case types.SpecStateInterrupted: test.Error = &JUnitError{ @@ -207,6 +247,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "interrupted", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } suite.Errors += 1 case types.SpecStateAborted: test.Failure = &JUnitFailure{ @@ -214,6 +257,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "aborted", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } suite.Errors += 1 case types.SpecStatePanicked: test.Error = &JUnitError{ @@ -221,6 +267,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "panicked", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } suite.Errors += 1 } @@ -287,63 +336,21 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { out := &strings.Builder{} - out.WriteString(spec.Failure.Location.String() + "\n") - out.WriteString(spec.Failure.Location.FullStackTrace) - if !spec.Failure.ProgressReport.IsZero() { - out.WriteString("\n") - NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(spec.Failure.ProgressReport) - } + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) if len(spec.AdditionalFailures) > 0 { - out.WriteString("\nThere were additional failures detected after the initial failure:\n") - for i, additionalFailure := range spec.AdditionalFailures { - NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitFailure(0, additionalFailure.State, additionalFailure.Failure, true) - if i < len(spec.AdditionalFailures)-1 { - out.WriteString("----------\n") - } - } + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") } return out.String() } func systemErrForUnstructuredReporters(spec types.SpecReport) string { out := &strings.Builder{} - gw := spec.CapturedGinkgoWriterOutput - cursor := 0 - for _, pr := range spec.ProgressReports { - if cursor < pr.GinkgoWriterOffset { - if pr.GinkgoWriterOffset < len(gw) { - out.WriteString(gw[cursor:pr.GinkgoWriterOffset]) - cursor = pr.GinkgoWriterOffset - } else if cursor < len(gw) { - out.WriteString(gw[cursor:]) - cursor = len(gw) - } - } - NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr) - } - - if cursor < len(gw) { - out.WriteString(gw[cursor:]) - } - + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) return out.String() } func systemOutForUnstructuredReporters(spec types.SpecReport) string { - systemOut := spec.CapturedStdOutErr - if len(spec.ReportEntries) > 0 { - systemOut += "\nReport Entries:\n" - for i, entry := range spec.ReportEntries { - systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) - if representation := entry.StringRepresentation(); representation != "" { - systemOut += representation + "\n" - } - if i+1 < len(spec.ReportEntries) { - systemOut += "--\n" - } - } - } - return systemOut + return spec.CapturedStdOutErr } // Deprecated JUnitReporter (so folks can still compile their suites) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go index f79f005d..5e726c46 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -9,13 +9,21 @@ type Reporter interface { WillRun(report types.SpecReport) DidRun(report types.SpecReport) SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) } type NoopReporter struct{} -func (n NoopReporter) SuiteWillBegin(report types.Report) {} -func (n NoopReporter) WillRun(report types.SpecReport) {} -func (n NoopReporter) DidRun(report types.SpecReport) {} -func (n NoopReporter) SuiteDidEnd(report types.Report) {} -func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index afc151b1..f33786a2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -35,7 +35,7 @@ func CurrentSpecReport() SpecReport { } /* - ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter - ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. - ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). @@ -50,9 +50,9 @@ const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, Report /* AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. It can take any of the following arguments: - - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. - - A ReportEntryVisibility enum to control the visibility of the ReportEntry - - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. @@ -100,6 +100,25 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) } +/* +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. + +They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. +ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportBeforeSuite(body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) +} + /* ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. @@ -113,6 +132,7 @@ In addition to using ReportAfterSuite to programmatically generate suite reports You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports */ func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index 68367446..ac9b7abb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -13,7 +13,7 @@ import ( /* The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: - fmt.Sprintf(formatString, parameters...) + fmt.Sprintf(formatString, parameters...) where parameters are the parameters passed into the entry. @@ -32,19 +32,20 @@ DescribeTable describes a table-driven spec. For example: - DescribeTable("a simple table", - func(x int, y int, expected bool) { - Ω(x > y).Should(Equal(expected)) - }, - Entry("x > y", 1, 0, true), - Entry("x == y", 0, 0, false), - Entry("x < y", 0, 1, false), - ) + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ func DescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() generateTable(description, args...) return true } @@ -53,6 +54,7 @@ func DescribeTable(description string, args ...interface{}) bool { You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ func FDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Focus) generateTable(description, args...) return true @@ -62,6 +64,7 @@ func FDescribeTable(description string, args ...interface{}) bool { You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ func PDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Pending) generateTable(description, args...) return true @@ -95,26 +98,29 @@ If you want to generate interruptible specs simply write a Table function that a You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ func Entry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ func FEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ func PEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* @@ -126,7 +132,8 @@ var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() func generateTable(description string, args ...interface{}) { - cl := types.NewCodeLocation(2) + GinkgoHelper() + cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index e4e9e38c..9cd57681 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -1,4 +1,5 @@ package types + import ( "fmt" "os" @@ -6,6 +7,7 @@ import ( "runtime" "runtime/debug" "strings" + "sync" ) type CodeLocation struct { @@ -37,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string { return lines[codeLocation.LineNumber-1] } +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + func NewCustomCodeLocation(message string) CodeLocation { return CodeLocation{ CustomMessage: message, @@ -44,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation { } func NewCodeLocation(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - return CodeLocation{FileName: file, LineNumber: line} + return clLocator.getCodeLocation(skip + 1) } func NewCodeLocationWithStackTrace(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl } // PruneStack removes references to functions that are internal to Ginkgo diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index f016c5c1..1efd77d3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -26,11 +26,11 @@ type SuiteConfig struct { FailOnPending bool FailFast bool FlakeAttempts int - EmitSpecProgress bool DryRun bool PollProgressAfter time.Duration PollProgressInterval time.Duration Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually OutputInterceptorMode string SourceRoots []string GracePeriod time.Duration @@ -81,13 +81,12 @@ func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { // Configuration for Ginkgo's reporter type ReporterConfig struct { - NoColor bool - SlowSpecThreshold time.Duration - Succinct bool - Verbose bool - VeryVerbose bool - FullTrace bool - AlwaysEmitGinkgoWriter bool + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool JSONReport string JUnitReport string @@ -110,9 +109,7 @@ func (rc ReporterConfig) WillGenerateReport() bool { } func NewDefaultReporterConfig() ReporterConfig { - return ReporterConfig{ - SlowSpecThreshold: 5 * time.Second, - } + return ReporterConfig{} } // Configuration for the Ginkgo CLI @@ -235,6 +232,9 @@ type deprecatedConfig struct { SlowSpecThresholdWithFLoatUnits float64 Stream bool Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool } // Flags @@ -275,8 +275,6 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, - {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", - Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", @@ -303,6 +301,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, } // ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) @@ -319,8 +319,6 @@ var ParallelConfigFlags = GinkgoFlags{ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, suppress color output in default reporter."}, - {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", - Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -329,8 +327,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out a very succinct report"}, {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, - {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", - Usage: "If set, default reporter prints out captured output of passed tests."}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -343,6 +341,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, } // BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index 2948dfa0..f267bdef 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -83,6 +83,13 @@ func (d deprecations) Nodot() Deprecation { } } +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation lock *sync.Mutex diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index b7ed5a21..1e0dbfd9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -108,8 +108,8 @@ Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -125,8 +125,8 @@ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocatio func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -298,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N } } +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + /* DeferCleanup errors */ func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { return GinkgoError{ @@ -320,7 +329,7 @@ func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), - Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 0403f9e6..b0d3b651 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) { } } +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + func ParseLabelFilter(input string) (LabelFilter, error) { if DEBUG_LABEL_FILTER_PARSING { fmt.Println("\n==============") fmt.Println("Input: ", input) fmt.Print("Tokens: ") } + if input == "" { + return func(_ []string) bool { return true }, nil + } nextToken := tokenize(input) root := &treeNode{token: lfTokenRoot} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index 798bedc0..7b1524b5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -6,8 +6,8 @@ import ( "time" ) -//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports -//and across the network connection when running in parallel +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel type ReportEntryValue struct { raw interface{} //unexported to prevent gob from freaking out about unregistered structs AsJSON string @@ -85,10 +85,12 @@ func (rev *ReportEntryValue) GobDecode(data []byte) error { type ReportEntry struct { // Visibility captures the visibility policy for this ReportEntry Visibility ReportEntryVisibility - // Time captures the time the AddReportEntry was called - Time time.Time // Location captures the location of the AddReportEntry call Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + // Name captures the name of this report Name string // Value captures the (optional) object passed into AddReportEntry - this can be @@ -120,7 +122,9 @@ func (entry ReportEntry) GetRawValue() interface{} { return entry.Value.GetRawValue() } - +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} type ReportEntries []ReportEntry diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index 9fc4425f..d048a8ad 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -2,6 +2,8 @@ package types import ( "encoding/json" + "fmt" + "sort" "strings" "time" ) @@ -56,19 +58,20 @@ type Report struct { SuiteConfig SuiteConfig //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite SpecReports SpecReports } -//PreRunStats contains a set of stats captured before the test run begins. This is primarily used -//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) -//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. type PreRunStats struct { TotalSpecs int SpecsThatWillRun int } -//Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes -//to form a complete final report. +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. func (report Report) Add(other Report) Report { report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded @@ -147,6 +150,9 @@ type SpecReport struct { // ParallelProcess captures the parallel process that this spec ran on ParallelProcess int + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) //It includes detailed information about the Failure Failure Failure @@ -178,6 +184,9 @@ type SpecReport struct { // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents } func (report SpecReport) MarshalJSON() ([]byte, error) { @@ -204,6 +213,7 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { ReportEntries ReportEntries `json:",omitempty"` ProgressReports []ProgressReport `json:",omitempty"` AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ ContainerHierarchyTexts: report.ContainerHierarchyTexts, ContainerHierarchyLocations: report.ContainerHierarchyLocations, @@ -238,6 +248,9 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { if len(report.AdditionalFailures) > 0 { out.AdditionalFailures = report.AdditionalFailures } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } return json.Marshal(out) } @@ -255,13 +268,13 @@ func (report SpecReport) CombinedOutput() string { return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput } -//Failed returns true if report.State is one of the SpecStateFailureStates +// Failed returns true if report.State is one of the SpecStateFailureStates // (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) func (report SpecReport) Failed() bool { return report.State.Is(SpecStateFailureStates) } -//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText func (report SpecReport) FullText() string { texts := []string{} texts = append(texts, report.ContainerHierarchyTexts...) @@ -271,7 +284,7 @@ func (report SpecReport) FullText() string { return strings.Join(texts, " ") } -//Labels returns a deduped set of all the spec's Labels. +// Labels returns a deduped set of all the spec's Labels. func (report SpecReport) Labels() []string { out := []string{} seen := map[string]bool{} @@ -293,7 +306,7 @@ func (report SpecReport) Labels() []string { return out } -//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) if err != nil { @@ -302,29 +315,54 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } -//FileName() returns the name of the file containing the spec +// FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName } -//LineNumber() returns the line number of the leaf node +// LineNumber() returns the line number of the leaf node func (report SpecReport) LineNumber() int { return report.LeafNodeLocation.LineNumber } -//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) func (report SpecReport) FailureMessage() string { return report.Failure.Message } -//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) func (report SpecReport) FailureLocation() CodeLocation { return report.Failure.Location } +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + type SpecReports []SpecReport -//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { count := 0 for i := range reports { @@ -344,7 +382,7 @@ func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { return out } -//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) WithState(states SpecState) SpecReports { count := 0 for i := range reports { @@ -363,7 +401,7 @@ func (reports SpecReports) WithState(states SpecState) SpecReports { return out } -//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) CountWithState(states SpecState) int { n := 0 for i := range reports { @@ -374,7 +412,7 @@ func (reports SpecReports) CountWithState(states SpecState) int { return n } -//If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. func (reports SpecReports) CountOfFlakedSpecs() int { n := 0 for i := range reports { @@ -385,7 +423,7 @@ func (reports SpecReports) CountOfFlakedSpecs() int { return n } -//If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts func (reports SpecReports) CountOfRepeatedSpecs() int { n := 0 for i := range reports { @@ -396,6 +434,53 @@ func (reports SpecReports) CountOfRepeatedSpecs() int { return n } +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + // Failure captures failure information for an individual test type Failure struct { // Message - the failure message passed into Fail(...). When using a matcher library @@ -408,6 +493,8 @@ type Failure struct { // This CodeLocation will include a fully-populated StackTrace Location CodeLocation + TimelineLocation TimelineLocation + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) // then ForwardedPanic will be populated with a string representation of the captured panic. ForwardedPanic string `json:",omitempty"` @@ -420,19 +507,29 @@ type Failure struct { // FailureNodeType will contain the NodeType of the node in which the failure occurred. // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. - FailureNodeContext FailureNodeContext - FailureNodeType NodeType - FailureNodeLocation CodeLocation - FailureNodeContainerIndex int + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` //ProgressReport is populated if the spec was interrupted or timed out - ProgressReport ProgressReport + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` } func (f Failure) IsZero() bool { return f.Message == "" && (f.Location == CodeLocation{}) } +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation +} + // FailureNodeContext captures the location context for the node containing the failing line of code type FailureNodeContext uint @@ -471,6 +568,10 @@ type AdditionalFailure struct { Failure Failure } +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + // SpecState captures the state of a spec // To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` type SpecState uint @@ -503,6 +604,9 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{ func (ss SpecState) String() string { return ssEnumSupport.String(uint(ss)) } +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} func (ss *SpecState) UnmarshalJSON(b []byte) error { out, err := ssEnumSupport.UnmarshJSON(b) *ss = SpecState(out) @@ -520,38 +624,40 @@ func (ss SpecState) Is(states SpecState) bool { // ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace type ProgressReport struct { - Message string - ParallelProcess int - RunningInParallel bool + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` - Time time.Time + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` - ContainerHierarchyTexts []string - LeafNodeText string - LeafNodeLocation CodeLocation - SpecStartTime time.Time + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` - CurrentNodeType NodeType - CurrentNodeText string - CurrentNodeLocation CodeLocation - CurrentNodeStartTime time.Time + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` - CurrentStepText string - CurrentStepLocation CodeLocation - CurrentStepStartTime time.Time + AdditionalReports []string `json:",omitempty"` - AdditionalReports []string + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` - CapturedGinkgoWriterOutput string `json:",omitempty"` - GinkgoWriterOffset int - - Goroutines []Goroutine + Goroutines []Goroutine `json:",omitempty"` } func (pr ProgressReport) IsZero() bool { return pr.CurrentNodeType == NodeTypeInvalid } +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + func (pr ProgressReport) SpecGoroutine() Goroutine { for _, goroutine := range pr.Goroutines { if goroutine.IsSpecGoroutine { @@ -589,6 +695,22 @@ func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { return out } +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + type Goroutine struct { ID uint64 State string @@ -643,6 +765,7 @@ const ( NodeTypeReportBeforeEach NodeTypeReportAfterEach + NodeTypeReportBeforeSuite NodeTypeReportAfterSuite NodeTypeCleanupInvalid @@ -652,9 +775,9 @@ const ( ) var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt -var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite -var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeInvalid): "INVALID NODE TYPE", @@ -672,6 +795,7 @@ var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", uint(NodeTypeReportBeforeEach): "ReportBeforeEach", uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", uint(NodeTypeReportAfterSuite): "ReportAfterSuite", uint(NodeTypeCleanupInvalid): "DeferCleanup", uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", @@ -694,3 +818,99 @@ func (nt NodeType) MarshalJSON() ([]byte, error) { func (nt NodeType) Is(nodeTypes NodeType) bool { return nt&nodeTypes != 0 } + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 7ba384a0..d11d0844 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.4.0" +const VERSION = "2.8.4" diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 5f12ff05..52266eae 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,4 +3,5 @@ . .idea gomega.iml -TODO.md \ No newline at end of file +TODO.md +.vscode \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index e088dc07..99ea9407 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,83 @@ +## 1.27.1 + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd] + +## 1.27.0 + +### Features +- Add HaveExactElements matcher (#634) [9d50783] +- update Gomega docs to discuss GinkgoHelper() [be32774] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b] +- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b] +- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab] +- test: update matrix for Go 1.20 (#635) [6bd25c8] +- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b] +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb] +- codeql: add ruby language (#626) [63c7d21] +- dependabot: add bundler package-ecosystem for docs (#625) [d92f963] + +## 1.26.0 + +### Features +- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090] +- improve eventually failure message output [c530fb3] + +### Fixes +- fix several documentation spelling issues [e2eff1f] + + +## 1.25.0 + +### Features +- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72] +- compare unwrapped errors using DeepEqual (#617) [aaeaa5d] + +### Maintenance +- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4] +- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb] +- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda] +- clean up go.sum [cd1dc1d] + +## 1.24.2 + +### Fixes +- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660] +- docs:Fix typo "you an" -> "you can" (#607) [3187c1f] +- fixes issue #600 (#606) [808d192] + +### Maintenance +- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8] +- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9] + +## 1.24.1 + +### Fixes +- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e] +- fix small typo (#601) [ea0ebe6] + +### Maintenance +- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372] +- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb] +- fix label-filter in test.yml [d795db6] +- stop running flakey tests and rely on external network dependencies in CI [7133290] + +## 1.24.0 + +### Features + +Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. + +This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. + +### Maintenance + +- Update BeComparableTo documentation [756eaa0] + ## 1.23.0 ### Features diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md index 7153b9b9..9973fff4 100644 --- a/vendor/github.com/onsi/gomega/RELEASING.md +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -5,7 +5,7 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: ```bash LAST_VERSION=$(git tag --sort=version:refname | tail -n1) CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) - echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n## Fixes\n\n## Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md ``` to update the changelog - Categorize the changes into diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index e236a40f..4405c068 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.23.0" +const GOMEGA_VERSION = "1.27.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -360,6 +360,16 @@ You can also pass additional arugments to functions that take a Gomega. The onl g.Expect(elements).To(ConsistOf(expected)) }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed()) +You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example: + + int count := 0 + Eventually(func() bool { + count++ + return count > 2 + }).MustPassRepeatedly(2).Should(BeTrue()) + // Because we had to wait for 2 calls that returned true + Expect(count).To(Equal(3)) + Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: Eventually(..., "1s", "2s", ctx).Should(...) @@ -368,9 +378,9 @@ is equivalent to Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(args ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Eventually(args...) + return Default.Eventually(actualOrCtx, args...) } // EventuallyWithOffset operates like Eventually but takes an additional @@ -382,9 +392,9 @@ func Eventually(args ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.EventuallyWithOffset(offset, args...) + return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } /* @@ -402,9 +412,9 @@ Consistently is useful in cases where you want to assert that something *does no This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(args ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Consistently(args...) + return Default.Consistently(actualOrCtx, args...) } // ConsistentlyWithOffset operates like Consistently but takes an additional @@ -413,9 +423,9 @@ func Consistently(args ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.ConsistentlyWithOffset(offset, args...) + return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } /* diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index c1e4a999..7f622696 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -2,6 +2,7 @@ package internal import ( "context" + "errors" "fmt" "reflect" "runtime" @@ -16,10 +17,37 @@ var errInterface = reflect.TypeOf((*error)(nil)).Elem() var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem() var contextType = reflect.TypeOf(new(context.Context)).Elem() +type formattedGomegaError interface { + FormattedGomegaError() string +} + +type asyncPolledActualError struct { + message string +} + +func (err *asyncPolledActualError) Error() string { + return err.message +} + +func (err *asyncPolledActualError) FormattedGomegaError() string { + return err.message +} + type contextWithAttachProgressReporter interface { AttachProgressReporter(func() string) func() } +type asyncGomegaHaltExecutionError struct{} + +func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {} +func (a asyncGomegaHaltExecutionError) Error() string { + return `An assertion has failed in a goroutine. You should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.` +} + type AsyncAssertionType uint const ( @@ -44,21 +72,23 @@ type AsyncAssertion struct { actual interface{} argsToForward []interface{} - timeoutInterval time.Duration - pollingInterval time.Duration - ctx context.Context - offset int - g *Gomega + timeoutInterval time.Duration + pollingInterval time.Duration + mustPassRepeatedly int + ctx context.Context + offset int + g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, ctx context.Context, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ - asyncType: asyncType, - timeoutInterval: timeoutInterval, - pollingInterval: pollingInterval, - offset: offset, - ctx: ctx, - g: g, + asyncType: asyncType, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + mustPassRepeatedly: mustPassRepeatedly, + offset: offset, + ctx: ctx, + g: g, } out.actual = actualInput @@ -104,6 +134,11 @@ func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) typ return assertion } +func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion { + assertion.mustPassRepeatedly = count + return assertion +} + func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) @@ -130,7 +165,9 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { if len(values) == 0 { - return nil, fmt.Errorf("No values were returned by the function passed to Gomega") + return nil, &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType), + } } actual := values[0].Interface() @@ -153,10 +190,12 @@ func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (in continue } if i == len(values)-2 && extraType.Implements(errInterface) { - err = fmt.Errorf("function returned error: %w", extra.(error)) + err = extra.(error) } if err == nil { - err = fmt.Errorf("Unexpected non-nil/non-zero return value at index %d:\n\t<%T>: %#v", i+1, extra, extra) + err = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)), + } } } @@ -191,6 +230,13 @@ You can learn more at https://onsi.github.io/gomega/#eventually `, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) } +func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error { + return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, reason) +} + func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { if !assertion.actualIsFunc { return func() (interface{}, error) { return assertion.actual, nil }, nil @@ -228,8 +274,11 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error skip = callerSkip[0] } _, file, line, _ := runtime.Caller(skip + 1) - assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) - panic("stop execution") + assertionFailure = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message), + } + // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine + panic(asyncGomegaHaltExecutionError{}) }))) } if takesContext { @@ -245,6 +294,13 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error return nil, assertion.argumentMismatchError(actualType, len(inValues)) } + if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually { + return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually") + } + if assertion.mustPassRepeatedly < 1 { + return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1") + } + return func() (actual interface{}, err error) { var values []reflect.Value assertionFailure = nil @@ -326,22 +382,39 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch timeout := assertion.afterTimeout() lock := sync.Mutex{} - var matches bool - var err error + var matches, hasLastValidActual bool + var actual, lastValidActual interface{} + var actualErr, matcherErr error var oracleMatcherSaysStop bool assertion.g.THelper() - pollActual, err := assertion.buildActualPoller() - if err != nil { - assertion.g.Fail(err.Error(), 2+assertion.offset) + pollActual, buildActualPollerErr := assertion.buildActualPoller() + if buildActualPollerErr != nil { + assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset) return false } - value, err := pollActual() - if err == nil { - oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) - matches, err = assertion.pollMatcher(matcher, value) + actual, actualErr = pollActual() + if actualErr == nil { + lastValidActual = actual + hasLastValidActual = true + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + matches, matcherErr = assertion.pollMatcher(matcher, actual) + } + + renderError := func(preamble string, err error) string { + message := "" + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + message = err.Error() + for _, attachment := range pollingSignalErr.Attachments { + message += fmt.Sprintf("\n%s:\n", attachment.Description) + message += format.Object(attachment.Object, 1) + } + } else { + message = preamble + "\n" + err.Error() + "\n" + format.Object(err, 1) + } + return message } messageGenerator := func() string { @@ -349,23 +422,45 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Lock() defer lock.Unlock() message := "" - if err != nil { - if pollingSignalErr, ok := AsPollingSignalError(err); ok && pollingSignalErr.IsStopTrying() { - message = err.Error() - for _, attachment := range pollingSignalErr.Attachments { - message += fmt.Sprintf("\n%s:\n", attachment.Description) - message += format.Object(attachment.Object, 1) + + if actualErr == nil { + if matcherErr == nil { + if desiredMatch { + message += matcher.FailureMessage(actual) + } else { + message += matcher.NegatedFailureMessage(actual) } } else { - message = "Error: " + err.Error() + "\n" + format.Object(err, 1) + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" + } else { + message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) + } } } else { - if desiredMatch { - message = matcher.FailureMessage(value) + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" } else { - message = matcher.NegatedFailureMessage(value) + message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr) + } + if hasLastValidActual { + message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType) + _, e := matcher.Match(lastValidActual) + if e != nil { + message += renderError(" the matcher returned the following error:", e) + } else { + message += " the matcher was not satisfied:\n" + if desiredMatch { + message += matcher.FailureMessage(lastValidActual) + } else { + message += matcher.NegatedFailureMessage(lastValidActual) + } + } } } + description := assertion.buildDescription(optionalDescription...) return fmt.Sprintf("%s%s", description, message) } @@ -384,30 +479,39 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } + // Used to count the number of times in a row a step passed + passedRepeatedlyCount := 0 for { var nextPoll <-chan time.Time = nil var isTryAgainAfterError = false - if pollingSignalErr, ok := AsPollingSignalError(err); ok { - if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") - return false - } - if pollingSignalErr.IsTryAgainAfter() { - nextPoll = time.After(pollingSignalErr.TryAgainDuration()) - isTryAgainAfterError = true + for _, err := range []error{actualErr, matcherErr} { + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + if pollingSignalErr.IsStopTrying() { + fail("Told to stop trying") + return false + } + if pollingSignalErr.IsTryAgainAfter() { + nextPoll = time.After(pollingSignalErr.TryAgainDuration()) + isTryAgainAfterError = true + } } } - if err == nil && matches == desiredMatch { + if actualErr == nil && matcherErr == nil && matches == desiredMatch { if assertion.asyncType == AsyncAssertionTypeEventually { - return true + passedRepeatedlyCount += 1 + if passedRepeatedlyCount == assertion.mustPassRepeatedly { + return true + } } } else if !isTryAgainAfterError { if assertion.asyncType == AsyncAssertionTypeConsistently { fail("Failed") return false } + // Reset the consecutive pass count + passedRepeatedlyCount = 0 } if oracleMatcherSaysStop { @@ -425,15 +529,19 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch select { case <-nextPoll: - v, e := pollActual() + a, e := pollActual() lock.Lock() - value, err = v, e + actual, actualErr = a, e lock.Unlock() - if err == nil { - oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) - m, e := assertion.pollMatcher(matcher, value) + if actualErr == nil { + lock.Lock() + lastValidActual = actual + hasLastValidActual = true + lock.Unlock() + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + m, e := assertion.pollMatcher(matcher, actual) lock.Lock() - matches, err = m, e + matches, matcherErr = m, e lock.Unlock() } case <-contextDone: diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index e75d2626..de1f4f33 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -2,7 +2,6 @@ package internal import ( "context" - "fmt" "time" "github.com/onsi/gomega/types" @@ -53,42 +52,38 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, args...) +func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, args...) +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) } -func (g *Gomega) Consistently(args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, args...) +func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, args...) +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) } -func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { baseOffset := 3 timeoutInterval := -time.Duration(1) pollingInterval := -time.Duration(1) intervals := []interface{}{} var ctx context.Context - if len(args) == 0 { - g.Fail(fmt.Sprintf("Call to %s is missing a value or function to poll", asyncAssertionType), offset+baseOffset) - return nil - } - actual := args[0] - startingIndex := 1 - if _, isCtx := args[0].(context.Context); isCtx && len(args) > 1 { + actual := actualOrCtx + startingIndex := 0 + if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual - if _, err := toDuration(args[1]); err != nil { - ctx = args[0].(context.Context) - actual = args[1] - startingIndex = 2 + if _, err := toDuration(args[0]); err != nil { + ctx = actualOrCtx.(context.Context) + actual = args[0] + startingIndex = 1 } } @@ -114,7 +109,7 @@ func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offse } } - return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, ctx, offset) + return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset) } func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index f9d9f2aa..44056ad6 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -27,7 +27,8 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher { } } -// BeComparableTo uses gocmp.Equal to compare. You can pass cmp.Option as options. +// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. +// You can pass cmp.Option as options. // It is an error for actual and expected to be nil. Use BeNil() instead. func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { return &matchers.BeComparableToMatcher{ @@ -348,6 +349,20 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } +// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar"))) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +// Actual must be an array or slice. +func HaveExactElements(elements ...interface{}) types.GomegaMatcher { + return &matchers.HaveExactElementsMatcher{ + Elements: elements, + } +} + // ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. // By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go new file mode 100644 index 00000000..19d8f3d1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -0,0 +1,75 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type mismatchFailure struct { + failure string + index int +} + +type HaveExactElementsMatcher struct { + Elements []interface{} + mismatchFailures []mismatchFailure + missingIndex int + extraIndex int +} + +func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { + if isMap(actual) { + return false, fmt.Errorf("error") + } + + matchers := matchers(matcher.Elements) + values := valuesOf(actual) + + lenMatchers := len(matchers) + lenValues := len(values) + + for i := 0; i < lenMatchers || i < lenValues; i++ { + if i >= lenMatchers { + matcher.extraIndex = i + continue + } + + if i >= lenValues { + matcher.missingIndex = i + return + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(values[i]) + if err != nil || !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(values[i]), + }) + } + } + + return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil +} + +func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { + message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements)) + if matcher.missingIndex > 0 { + message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex) + } + if matcher.extraIndex > 0 { + message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex) + } + if len(matcher.mismatchFailures) != 0 { + message = fmt.Sprintf("%s\nthe mismatch indexes were:", message) + } + for _, mismatch := range matcher.mismatchFailures { + message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure) + } + return +} + +func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c8993a86..827475ea 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -25,7 +25,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { - return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil + // first try the built-in errors.Is + if errors.Is(actualErr, expected.(error)) { + return true, nil + } + // if not, try DeepEqual along the error chain + for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) { + if reflect.DeepEqual(unwrapped, expected) { + return true, nil + } + } + return false, nil } if isString(expected) { diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go index 721ed552..da5a3959 100644 --- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -1,11 +1,16 @@ package matchers import ( + "errors" "fmt" "github.com/onsi/gomega/format" ) +type formattedGomegaError interface { + FormattedGomegaError() string +} + type SucceedMatcher struct { } @@ -25,6 +30,10 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + var fgErr formattedGomegaError + if errors.As(actual.(error), &fgErr) { + return fgErr.FormattedGomegaError() + } return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) } diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 089505a4..7c7adb94 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -19,11 +19,11 @@ type Gomega interface { Expect(actual interface{}, extra ...interface{}) Assertion ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion - Eventually(args ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion + Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion - Consistently(args ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion + Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -75,6 +75,7 @@ type AsyncAssertion interface { ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion WithArguments(argsToForward ...interface{}) AsyncAssertion + MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index 3ba05276..d2c8aada 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,14 @@ Releases ======== +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + v1.8.0 (2022-02-28) =================== diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index f45af149..cdd91ae5 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -20,106 +20,109 @@ // Package multierr allows combining one or more errors together. // -// Overview +// # Overview // // Errors can be combined with the use of the Combine function. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) // // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The underlying list of errors for a returned error object may be retrieved // with the Errors function. // -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } // -// Appending from a loop +// # Appending from a loop // // You sometimes need to append into an error from a loop. // -// var err error -// for _, item := range items { -// err = multierr.Append(err, process(item)) -// } +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } // // Cases like this may require knowledge of whether an individual instance // failed. This usually requires introduction of a new variable. // -// var err error -// for _, item := range items { -// if perr := process(item); perr != nil { -// log.Warn("skipping item", item) -// err = multierr.Append(err, perr) -// } -// } +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } // // multierr includes AppendInto to simplify cases like this. // -// var err error -// for _, item := range items { -// if multierr.AppendInto(&err, process(item)) { -// log.Warn("skipping item", item) -// } -// } +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } // // This will append the error into the err variable, and return true if that // individual error was non-nil. // -// See AppendInto for more information. +// See [AppendInto] for more information. // -// Deferred Functions +// # Deferred Functions // // Go makes it possible to modify the return value of a function in a defer // block if the function was using named returns. This makes it possible to // record resource cleanup failures from deferred blocks. // -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } // // multierr provides the Invoker type and AppendInvoke function to make cases // like the above simpler and obviate the need for a closure. The following is // roughly equivalent to the example above. // -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(conn)) -// // ... -// } +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } // -// See AppendInvoke and Invoker for more information. +// See [AppendInvoke] and [Invoker] for more information. // -// Advanced Usage +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage // // Errors returned by Combine and Append MAY implement the following // interface. // -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } // // Note that if you need access to list of errors behind a multierr error, you // should prefer using the Errors function. That said, if you need cheap @@ -128,13 +131,13 @@ // because errors returned by Combine and Append are not guaranteed to // implement this interface. // -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } package multierr // import "go.uber.org/multierr" import ( @@ -185,8 +188,8 @@ type errorGroup interface { // Errors returns a slice containing zero or more errors that the supplied // error is composed of. If the error is nil, a nil slice is returned. // -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) // // If the error is not composed of other errors, the returned slice contains // just the error that was passed in. @@ -209,10 +212,7 @@ func Errors(err error) []error { return []error{err} } - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result + return append(([]error)(nil), eg.Errors()...) } // multiError is an error that holds one or more errors. @@ -393,8 +393,7 @@ func fromSlice(errors []error) error { // Otherwise "errors" escapes to the heap // unconditionally for all other cases. // This lets us optimize for the "no errors" case. - out := make([]error, len(errors)) - copy(out, errors) + out := append(([]error)(nil), errors...) return &multiError{errors: out} } } @@ -420,32 +419,32 @@ func fromSlice(errors []error) error { // If zero arguments were passed or if all items are nil, a nil error is // returned. // -// Combine(nil, nil) // == nil +// Combine(nil, nil) // == nil // // If only a single error was passed, it is returned as-is. // -// Combine(err) // == err +// Combine(err) // == err // // Combine skips over nil arguments so this function may be used to combine // together errors from operations that fail independently of each other. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) // // If any of the passed errors is a multierr error, it will be flattened along // with the other errors. // -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) // // The returned error formats into a readable multi-line error message if // formatted with %+v. // -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) func Combine(errors ...error) error { return fromSlice(errors) } @@ -455,16 +454,19 @@ func Combine(errors ...error) error { // This function is a specialization of Combine for the common case where // there are only two errors. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The following pattern may also be used to record failure of deferred // operations without losing information about the original error. // -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. func Append(left error, right error) error { switch { case left == nil: @@ -494,37 +496,37 @@ func Append(left error, right error) error { // AppendInto appends an error into the destination of an error pointer and // returns whether the error being appended was non-nil. // -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) // // The above is equivalent to, // -// err := multierr.Append(r.Close(), w.Close()) +// err := multierr.Append(r.Close(), w.Close()) // // As AppendInto reports whether the provided error was non-nil, it may be // used to build a multierr error in a loop more ergonomically. For example: // -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } // // Compare this with a version that relies solely on Append: // -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } func AppendInto(into *error, err error) (errored bool) { if into == nil { // We panic if 'into' is nil. This is not documented above @@ -545,7 +547,7 @@ func AppendInto(into *error, err error) (errored bool) { // AppendInvoke to append the result of calling the function into an error. // This allows you to conveniently defer capture of failing operations. // -// See also, Close and Invoke. +// See also, [Close] and [Invoke]. type Invoker interface { Invoke() error } @@ -556,19 +558,22 @@ type Invoker interface { // // For example, // -// func processReader(r io.Reader) (err error) { -// scanner := bufio.NewScanner(r) -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// for scanner.Scan() { -// // ... -// } -// // ... -// } +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } // // In this example, the following line will construct the Invoker right away, // but defer the invocation of scanner.Err() until the function returns. // -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. type Invoke func() error // Invoke calls the supplied function and returns its result. @@ -579,19 +584,22 @@ func (i Invoke) Invoke() error { return i() } // // For example, // -// func processFile(path string) (err error) { -// f, err := os.Open(path) -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// return processReader(f) -// } +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } // // In this example, multierr.Close will construct the Invoker right away, but // defer the invocation of f.Close until the function returns. // -// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. func Close(closer io.Closer) Invoker { return Invoke(closer.Close) } @@ -601,52 +609,73 @@ func Close(closer io.Closer) Invoker { // invocation of fallible operations until a function returns, and capture the // resulting errors. // -// func doSomething(...) (err error) { -// // ... -// f, err := openFile(..) -// if err != nil { -// return err -// } +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } +// +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) // -// // multierr will call f.Close() when this function returns and -// // if the operation fails, its append its error into the -// // returned error. -// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) // -// scanner := bufio.NewScanner(f) -// // Similarly, this scheduled scanner.Err to be called and -// // inspected when the function returns and append its error -// // into the returned error. -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// // ... +// } // -// // ... -// } +// NOTE: If used with a defer, the error variable MUST be a named return. // // Without defer, AppendInvoke behaves exactly like AppendInto. // -// err := // ... -// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) // -// // ...is roughly equivalent to... +// // ...is roughly equivalent to... // -// err := // ... -// multierr.AppendInto(&err, foo()) +// err := // ... +// multierr.AppendInto(&err, foo()) // // The advantage of the indirection introduced by Invoker is to make it easy // to defer the invocation of a function. Without this indirection, the // invoked function will be evaluated at the time of the defer block rather // than when the function returns. // -// // BAD: This is likely not what the caller intended. This will evaluate -// // foo() right away and append its result into the error when the -// // function returns. -// defer multierr.AppendInto(&err, foo()) +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) // -// // GOOD: This will defer invocation of foo unutil the function returns. -// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) // // multierr provides a few Invoker implementations out of the box for -// convenience. See Invoker for more information. +// convenience. See [Invoker] for more information. func AppendInvoke(into *error, invoker Invoker) { AppendInto(into, invoker.Invoke()) } + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index da36c370..0db1f9f1 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,6 +3,19 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + ## 1.23.0 (24 Aug 2022) Enhancements: diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go index ea94f9d7..d0d2c49d 100644 --- a/vendor/go.uber.org/zap/array_go118.go +++ b/vendor/go.uber.org/zap/array_go118.go @@ -76,9 +76,9 @@ func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { return nil } -// objectMarshalerPtr is a constraint that specifies that the given type +// ObjectMarshalerPtr is a constraint that specifies that the given type // implements zapcore.ObjectMarshaler on a pointer receiver. -type objectMarshalerPtr[T any] interface { +type ObjectMarshalerPtr[T any] interface { *T zapcore.ObjectMarshaler } @@ -105,11 +105,11 @@ type objectMarshalerPtr[T any] interface { // // var requests []*Request = ... // logger.Info("sending requests", zap.Objects("requests", requests)) -func ObjectValues[T any, P objectMarshalerPtr[T]](key string, values []T) Field { +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { return Array(key, objectValues[T, P](values)) } -type objectValues[T any, P objectMarshalerPtr[T]] []T +type objectValues[T any, P ObjectMarshalerPtr[T]] []T func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { for i := range os { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index b5f9a99f..cd44030d 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -183,6 +183,13 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + // Check returns a CheckedEntry if logging a message at the specified level // is enabled. It's a completely optional optimization; in high-performance // applications, Check can help avoid allocating a slice to hold fields. diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 1511166c..c4f3bca3 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -133,7 +133,8 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { } // OnFatal sets the action to take on fatal logs. -// Deprecated: Use WithFatalHook instead. +// +// Deprecated: Use [WithFatalHook] instead. func OnFatal(action zapcore.CheckWriteAction) Option { return WithFatalHook(action) } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index df46fa87..478c9a10 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -26,6 +26,7 @@ import ( "io" "net/url" "os" + "path/filepath" "strings" "sync" @@ -34,23 +35,7 @@ import ( const schemeFile = "file" -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} +var _sinkRegistry = newSinkRegistry() // Sink defines the interface to write to and close logger destinations. type Sink interface { @@ -58,10 +43,6 @@ type Sink interface { io.Closer } -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - type errSinkNotFound struct { scheme string } @@ -70,16 +51,29 @@ func (e *errSinkNotFound) Error() string { return fmt.Sprintf("no sink found for scheme %q", e.scheme) } -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() if scheme == "" { return errors.New("can't register a sink factory for empty string") @@ -88,14 +82,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { if err != nil { return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) } - if _, ok := _sinkFactories[normalized]; ok { + if _, ok := sr.factories[normalized]; ok { return fmt.Errorf("sink factory already registered for scheme %q", normalized) } - _sinkFactories[normalized] = factory + sr.factories[normalized] = factory return nil } -func newSink(rawURL string) (Sink, error) { +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + u, err := url.Parse(rawURL) if err != nil { return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) @@ -104,16 +106,27 @@ func newSink(rawURL string) (Sink, error) { u.Scheme = schemeFile } - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() if !ok { return nil, &errSinkNotFound{u.Scheme} } return factory(u) } -func newFileSink(u *url.URL) (Sink, error) { +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { if u.User != nil { return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) } @@ -130,13 +143,18 @@ func newFileSink(u *url.URL) (Sink, error) { if hn := u.Hostname(); hn != "" && hn != "localhost" { return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) } - switch u.Path { + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { case "stdout": return nopCloserSink{os.Stdout}, nil case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go index 3d187fa5..817a3bde 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -154,7 +154,7 @@ func newStackFormatter(b *buffer.Buffer) stackFormatter { // the final runtime.main/runtime.goexit frame. func (sf *stackFormatter) FormatStack(stack *stacktrace) { // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which + // frame, but we ignore this frame. The last frame is a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. for frame, more := stack.Next(); more; frame, more = stack.Next() { sf.FormatFrame(frame) diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index c450b2dd..ac387b3e 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -31,6 +31,7 @@ import ( const ( _oddNumberErrMsg = "Ignored key without a value." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." ) // A SugaredLogger wraps the base Logger functionality in a slower, but less @@ -114,6 +115,13 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + // Debug uses fmt.Sprint to construct and log a message. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) @@ -329,10 +337,13 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { return nil } - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) for i := 0; i < len(args); { // This is a strongly-typed field. Consume it and move on. @@ -342,6 +353,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { continue } + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + // Make sure this element isn't a dangling key. if i == len(args)-1 { s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 2c3dbd42..f08728e1 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -68,7 +68,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { var openErr error for _, path := range paths { - sink, err := newSink(path) + sink, err := _sinkRegistry.newSink(path) if err != nil { openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) continue diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index ea0431eb..9d326e95 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -281,7 +281,8 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { // Should sets this CheckedEntry's CheckWriteAction, which controls whether a // Core will panic or fatal after writing this log entry. Like AddCore, it's // safe to call on nil CheckedEntry references. -// Deprecated: Use After(ent Entry, after CheckWriteHook) instead. +// +// Deprecated: Use [CheckedEntry.After] instead. func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { return ce.After(ent, should) } diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 484ca51b..96f4a1a5 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -117,7 +117,7 @@ func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) } ps := decrypted[len(decrypted)-psLen:] decrypted = decrypted[:len(decrypted)-psLen] - if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { return nil, ErrDecryption } diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go index 7499e3fb..05de9cc2 100644 --- a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -14,6 +14,7 @@ package rc2 import ( "crypto/cipher" "encoding/binary" + "math/bits" ) // The rc2 block size in bytes @@ -80,10 +81,6 @@ func expandKey(key []byte, t1 int) [64]uint16 { return k } -func rotl16(x uint16, b uint) uint16 { - return (x >> (16 - b)) | (x << b) -} - func (c *rc2Cipher) Encrypt(dst, src []byte) { r0 := binary.LittleEndian.Uint16(src[0:]) @@ -96,22 +93,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 16 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -124,22 +121,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 40 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -152,22 +149,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 60 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -188,22 +185,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 44 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- } @@ -215,22 +212,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 20 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- @@ -243,22 +240,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 0 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 4600c207..fc04d03e 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -251,7 +251,7 @@ type algorithmOpenSSHCertSigner struct { // private key is held by signer. It returns an error if the public key in cert // doesn't match the key used by signer. func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { return nil, errors.New("ssh: signer and cert have different public key") } diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index c3062cfd..87f48552 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -96,13 +96,13 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, // are not supported and will not be negotiated, even if explicitly requested in // ClientConfig.Crypto.Ciphers. var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms // are defined in the order specified in the RFC. "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. // They are defined in the order specified in the RFC. "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, @@ -110,7 +110,7 @@ var cipherModes = map[string]*cipherMode{ // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. + // RFC 4345 introduces improved versions of Arcfour. "arcfour": {16, 0, streamCipherMode(0, newRC4)}, // AEAD ciphers @@ -641,7 +641,7 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" // // https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 // -// the methods here also implement padding, which RFC4253 Section 6 +// the methods here also implement padding, which RFC 4253 Section 6 // also requires of stream ciphers. type chacha20Poly1305Cipher struct { lengthKey [32]byte diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 2a47a61d..c7964275 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "math" + "strings" "sync" _ "crypto/sha1" @@ -118,6 +119,20 @@ func algorithmsForKeyFormat(keyFormat string) []string { } } +// supportedPubKeyAuthAlgos specifies the supported client public key +// authentication algorithms. Note that this doesn't include certificate types +// since those use the underlying algorithm. This list is sent to the client if +// it supports the server-sig-algs extension. Order is irrelevant. +var supportedPubKeyAuthAlgos = []string{ + KeyAlgoED25519, + KeyAlgoSKED25519, KeyAlgoSKECDSA256, + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, + KeyAlgoDSA, +} + +var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",") + // unexpectedMessageError results when the SSH message that we received didn't // match what we wanted. func unexpectedMessageError(expected, got uint8) error { @@ -149,7 +164,7 @@ type directionAlgorithms struct { // rekeyBytes returns a rekeying intervals in bytes. func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after + // According to RFC 4344 block ciphers should rekey after // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is // 128. switch a.Cipher { @@ -158,7 +173,7 @@ func (a *directionAlgorithms) rekeyBytes() int64 { } - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + // For others, stick with RFC 4253 recommendation to rekey after 1 Gb of data. return 1 << 30 } diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go index fd6b0681..35661a52 100644 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -52,7 +52,7 @@ type Conn interface { // SendRequest sends a global request, and returns the // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. + // and payload. See also RFC 4254, section 4. SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) // OpenChannel tries to open an channel. If the request is diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 653dc4d2..07a1843e 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -58,11 +58,13 @@ type handshakeTransport struct { incoming chan []byte readError error - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + writePacketsLeft uint32 + writeBytesLeft int64 // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -71,7 +73,8 @@ type handshakeTransport struct { // If the other side requests or confirms a kex, its kexInit // packet is sent here for the write loop to find it. - startKex chan *pendingKex + startKex chan *pendingKex + kexLoopDone chan struct{} // closed (with writeError non-nil) when kexLoop exits // data for host key checking hostKeyCallback HostKeyCallback @@ -86,12 +89,10 @@ type handshakeTransport struct { // Algorithms agreed in the last key exchange. algorithms *algorithms + // Counters exclusively owned by readLoop. readPacketsLeft uint32 readBytesLeft int64 - writePacketsLeft uint32 - writeBytesLeft int64 - // The session ID or nil if first kex did not complete yet. sessionID []byte } @@ -108,7 +109,8 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, clientVersion: clientVersion, incoming: make(chan []byte, chanSize), requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), + startKex: make(chan *pendingKex), + kexLoopDone: make(chan struct{}), config: config, } @@ -340,16 +342,17 @@ write: t.mu.Unlock() } + // Unblock reader. + t.conn.Close() + // drain startKex channel. We don't service t.requestKex // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() + for request := range t.startKex { + request.done <- t.getWriteError() + } - // Unblock reader. - t.conn.Close() + // Mark that the loop is done so that Close can return. + close(t.kexLoopDone) } // The protocol uses uint32 for packet counters, so we can't let them @@ -545,7 +548,16 @@ func (t *handshakeTransport) writePacket(p []byte) error { } func (t *handshakeTransport) Close() error { - return t.conn.Close() + // Close the connection. This should cause the readLoop goroutine to wake up + // and close t.startKex, which will shut down kexLoop if running. + err := t.conn.Close() + + // Wait for the kexLoop goroutine to complete. + // At that point we know that the readLoop goroutine is complete too, + // because kexLoop itself waits for readLoop to close the startKex channel. + <-t.kexLoopDone + + return err } func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { @@ -615,7 +627,8 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return err } - if t.sessionID == nil { + firstKeyExchange := t.sessionID == nil + if firstKeyExchange { t.sessionID = result.H } result.SessionID = t.sessionID @@ -626,6 +639,24 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { return err } + + // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO + // message with the server-sig-algs extension if the client supports it. See + // RFC 8308, Sections 2.4 and 3.1. + if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { + extInfo := &extInfoMsg{ + NumExtensions: 1, + Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)), + } + extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) + extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) + extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) + extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) + if err := t.conn.writePacket(Marshal(extInfo)); err != nil { + return err + } + } + if packet, err := t.conn.readPacket(); err != nil { return err } else if packet[0] != msgNewKeys { diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 1c7de1a6..72969804 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -184,7 +184,7 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey return "", nil, nil, "", nil, io.EOF } -// ParseAuthorizedKeys parses a public key from an authorized_keys +// ParseAuthorizedKey parses a public key from an authorized_keys // file used in OpenSSH according to the sshd(8) manual page. func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { for len(in) > 0 { diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index 19bc67c4..922032d9 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -68,7 +68,7 @@ type kexInitMsg struct { // See RFC 4253, section 8. -// Diffie-Helman +// Diffie-Hellman const msgKexDHInit = 30 type kexDHInitMsg struct { diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 70045bdf..9e387029 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -68,8 +68,16 @@ type ServerConfig struct { // NoClientAuth is true if clients are allowed to connect without // authenticating. + // To determine NoClientAuth at runtime, set NoClientAuth to true + // and the optional NoClientAuthCallback to a non-nil value. NoClientAuth bool + // NoClientAuthCallback, if non-nil, is called when a user + // attempts to authenticate with auth method "none". + // NoClientAuth must also be set to true for this be used, or + // this func is unused. + NoClientAuthCallback func(ConnMetadata) (*Permissions, error) + // MaxAuthTries specifies the maximum number of authentication attempts // permitted per connection. If set to a negative number, the number of // attempts are unlimited. If set to zero, the number of attempts are limited @@ -283,15 +291,6 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) return perms, err } -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - return true - } - return false -} - func checkSourceAddress(addr net.Addr, sourceAddrs string) error { if addr == nil { return errors.New("ssh: no address known for client, but source-address match required") @@ -455,7 +454,11 @@ userAuthLoop: switch userAuthReq.Method { case "none": if config.NoClientAuth { - authErr = nil + if config.NoClientAuthCallback != nil { + perms, authErr = config.NoClientAuthCallback(s) + } else { + authErr = nil + } } // allow initial attempt of 'none' without penalty @@ -502,7 +505,7 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } algo := string(algoBytes) - if !isAcceptableAlgo(algo) { + if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) break } @@ -560,7 +563,7 @@ userAuthLoop: // algorithm name that corresponds to algo with // sig.Format. This is usually the same, but // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { + if !contains(supportedPubKeyAuthAlgos, sig.Format) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 291c9190..46a89eda 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -184,7 +184,7 @@ func (p *parser) clearStackToContext(s scope) { } } -// parseGenericRawTextElements implements the generic raw text element parsing +// parseGenericRawTextElement implements the generic raw text element parsing // algorithm defined in 12.2.6.2. // https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text // TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index be3c7541..50f7c6aa 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd: // readComment reads the next comment token starting with "") return } @@ -628,17 +632,50 @@ func (z *Tokenizer) readComment() { if dashCount >= 2 { c = z.readByte() if z.err != nil { - z.data.end = z.raw.end + z.data.end = z.calculateAbruptCommentDataEnd() return - } - if c == '>' { + } else if c == '>' { z.data.end = z.raw.end - len("--!>") return + } else if c == '-' { + dashCount = 1 + beginning = false + continue } } } dashCount = 0 + beginning = false + } +} + +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len(""); err != nil { diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index 50f7c6aa..5c2a1f4e 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -110,7 +110,7 @@ func (t Token) String() string { case SelfClosingTagToken: return "<" + t.tagString() + "/>" case CommentToken: - return "" + return "" case DoctypeToken: return "" } @@ -598,10 +598,10 @@ scriptDataDoubleEscapeEnd: // readComment reads the next comment token starting with "