From 0a96666484c432529eb68e7d49d02ecab3621f9c Mon Sep 17 00:00:00 2001 From: dprotaso Date: Fri, 21 Jun 2024 11:08:12 -0400 Subject: [PATCH] drop legacy test packages that were once used by test-infra --- test/ci/ci.go | 44 + test/{prow/env_test.go => ci/ci_test.go} | 30 +- test/gcs/client.go | 51 -- test/gcs/gcs.go | 267 ------ test/gcs/helpers.go | 58 -- test/gcs/helpers_test.go | 121 --- test/gcs/mock/errors.go | 76 -- test/gcs/mock/mock.go | 449 ---------- test/gcs/mock/mock_example_test.go | 79 -- test/gcs/mock/mock_storage.go | 71 -- test/gcs/mock/mock_test.go | 1041 ---------------------- test/gcs/mock/test/upload | 1 - test/gke/addon.go | 55 -- test/gke/client.go | 126 --- test/gke/client_test.go | 59 -- test/gke/endpoint.go | 56 -- test/gke/endpoint_test.go | 48 - test/gke/fake/client.go | 205 ----- test/gke/fake/credentials.json | 6 - test/gke/location.go | 44 - test/gke/location_test.go | 59 -- test/gke/request.go | 170 ---- test/gke/request_test.go | 187 ---- test/gke/wait.go | 77 -- test/interactive/command.go | 81 -- test/interactive/command_test.go | 67 -- test/interactive/docker.go | 115 --- test/interactive/docker_test.go | 81 -- test/monitoring/doc.go | 32 - test/monitoring/monitoring.go | 84 -- test/prometheus/prometheus.go | 135 --- test/prometheus/prometheus_test.go | 77 -- test/prow/env.go | 59 -- test/prow/prow.go | 334 +------ test/prow/prow_test.go | 88 -- 35 files changed, 78 insertions(+), 4455 deletions(-) create mode 100644 test/ci/ci.go rename test/{prow/env_test.go => ci/ci_test.go} (52%) delete mode 100644 test/gcs/client.go delete mode 100644 test/gcs/gcs.go delete mode 100644 test/gcs/helpers.go delete mode 100644 test/gcs/helpers_test.go delete mode 100644 test/gcs/mock/errors.go delete mode 100644 test/gcs/mock/mock.go delete mode 100644 test/gcs/mock/mock_example_test.go delete mode 100644 test/gcs/mock/mock_storage.go delete mode 100644 test/gcs/mock/mock_test.go delete mode 100644 test/gcs/mock/test/upload delete mode 100644 test/gke/addon.go delete mode 100644 test/gke/client.go delete mode 100644 test/gke/client_test.go delete mode 100644 test/gke/endpoint.go delete mode 100644 test/gke/endpoint_test.go delete mode 100644 test/gke/fake/client.go delete mode 100644 test/gke/fake/credentials.json delete mode 100644 test/gke/location.go delete mode 100644 test/gke/location_test.go delete mode 100644 test/gke/request.go delete mode 100644 test/gke/request_test.go delete mode 100644 test/gke/wait.go delete mode 100644 test/interactive/command.go delete mode 100644 test/interactive/command_test.go delete mode 100644 test/interactive/docker.go delete mode 100644 test/interactive/docker_test.go delete mode 100644 test/monitoring/doc.go delete mode 100644 test/monitoring/monitoring.go delete mode 100644 test/prometheus/prometheus.go delete mode 100644 test/prometheus/prometheus_test.go delete mode 100644 test/prow/env.go delete mode 100644 test/prow/prow_test.go diff --git a/test/ci/ci.go b/test/ci/ci.go new file mode 100644 index 0000000000..7480be53de --- /dev/null +++ b/test/ci/ci.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ci + +import ( + "log" + "os" + "strings" +) + +const ( + // ArtifactsDir is the dir containing artifacts + ArtifactsDir = "artifacts" +) + +// IsCI returns whether the current environment is a CI environment. +func IsCI() bool { + return strings.EqualFold(os.Getenv("CI"), "true") +} + +// GetLocalArtifactsDir gets the artifacts directory where prow looks for artifacts. +// By default, it will look at the env var ARTIFACTS. +func GetLocalArtifactsDir() string { + dir := os.Getenv("ARTIFACTS") + if dir == "" { + log.Printf("Env variable ARTIFACTS not set. Using %s instead.", ArtifactsDir) + dir = ArtifactsDir + } + return dir +} diff --git a/test/prow/env_test.go b/test/ci/ci_test.go similarity index 52% rename from test/prow/env_test.go rename to test/ci/ci_test.go index 2b1ece3f90..4d570056b2 100644 --- a/test/prow/env_test.go +++ b/test/ci/ci_test.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,31 @@ See the License for the specific language governing permissions and limitations under the License. */ -package prow +package ci import ( "testing" ) -func TestGetEnvConfig(t *testing.T) { +func TestIsCI(t *testing.T) { t.Setenv("CI", "true") - ec, err := GetEnvConfig() - t.Log("EnvConfig is:", ec) - if err != nil { - t.Fatal("Error getting envconfig for Prow:", err) + if ic := IsCI(); !ic { + t.Fatal("Expected: true, actual: false") } - if !ec.CI { - t.Fatal("Expected CI to be true but is false") +} + +func TestGetArtifacts(t *testing.T) { + // Test we can read from the env var + t.Setenv("ARTIFACTS", "test") + v := GetLocalArtifactsDir() + if v != "test" { + t.Fatalf("Actual artifacts dir: '%s' and Expected: 'test'", v) } - t.Setenv("CI", "false") - if _, err = GetEnvConfig(); err == nil { - t.Fatal("Expected an error if called from a non-CI environment but got nil") + // Test we can use the default + t.Setenv("ARTIFACTS", "") + v = GetLocalArtifactsDir() + if v != "artifacts" { + t.Fatalf("Actual artifacts dir: '%s' and Expected: 'artifacts'", v) } } diff --git a/test/gcs/client.go b/test/gcs/client.go deleted file mode 100644 index a78318abe8..0000000000 --- a/test/gcs/client.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcs - -import ( - "context" - - "cloud.google.com/go/storage" -) - -type Client interface { - // NewStorageBucket creates a new bucket in GCS with uniform access policy - NewStorageBucket(ctx context.Context, bkt, project string) error - // DeleteStorageBucket removes all children objects, force if not empty - DeleteStorageBucket(ctx context.Context, bkt string, force bool) error - // Exists check if an object exists under a bucket, assuming bucket exists - Exists(ctx context.Context, bkt, objPath string) bool - // ListChildrenFiles recursively lists all children files - ListChildrenFiles(ctx context.Context, bkt, dirPath string) ([]string, error) - // ListDirectChildren lists direct children paths (incl. files and dir) - ListDirectChildren(ctx context.Context, bkt, dirPath string) ([]string, error) - // AttrObject returns the object attributes - AttrObject(ctx context.Context, bkt, objPath string) (*storage.ObjectAttrs, error) - // CopyObject copies objects from one location to another, assuming both src and dst - // buckets both exist - CopyObject(ctx context.Context, srcBkt, srcObjPath, dstBkt, dstObjPath string) error - // ReadObject reads a GCS object and returns then contents in []byte - ReadObject(ctx context.Context, bkt, objPath string) ([]byte, error) - // WriteObject writes []byte content to a GCS object - WriteObject(ctx context.Context, bkt, objPath string, content []byte) (int, error) - // DeleteObject deletes an object - DeleteObject(ctx context.Context, bkt, objPath string) error - // Download downloads GCS object to a local file, assuming bucket exists - Download(ctx context.Context, bktName, objPath, filePath string) error - // Upload uploads a local file to a GCS object, assuming bucket exists - Upload(ctx context.Context, bktName, objPath, filePath string) error -} diff --git a/test/gcs/gcs.go b/test/gcs/gcs.go deleted file mode 100644 index fa0bba3801..0000000000 --- a/test/gcs/gcs.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcs - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "path" - "strings" - - "cloud.google.com/go/storage" - "google.golang.org/api/iterator" - "google.golang.org/api/option" -) - -// nolint // there's also Client so they collide. -type GCSClient struct { - *storage.Client -} - -// NewClient creates new GCS client with given service account -func NewClient(ctx context.Context, serviceAccount string) (*GCSClient, error) { - client, err := storage.NewClient(ctx, option.WithCredentialsFile(serviceAccount)) - if err != nil { - return nil, err - } - return &GCSClient{Client: client}, nil -} - -// NewStorageBucket creates a new bucket in GCS with uniform access policy -func (g *GCSClient) NewStorageBucket(ctx context.Context, bucketName, project string) error { - if project == "" { - return errors.New("a project must be provided") - } - - if bucketName == "" { - return errors.New("a bucket name must be provided") - } - - bucket := g.Bucket(bucketName) - - // For now, this creates a bucket with uniform policy across its objects to make ACL - // and permission management simple instead of object-level granularity that we currently - // do not use anyway. - bucketAttr := &storage.BucketAttrs{ - BucketPolicyOnly: storage.BucketPolicyOnly{ - Enabled: true, - }, - } - - return bucket.Create(ctx, project, bucketAttr) -} - -// DeleteStorageBucket removes all children objects and then deletes the bucket -func (g *GCSClient) DeleteStorageBucket(ctx context.Context, bucketName string, force bool) error { - children, err := g.ListChildrenFiles(ctx, bucketName, "") - if err != nil { - return err - } - - if len(children) == 0 && !force { - return fmt.Errorf("bucket %s not empty, please use force=true", bucketName) - } - - for _, child := range children { - if err := g.DeleteObject(ctx, bucketName, child); err != nil { - return err - } - } - return g.Bucket(bucketName).Delete(ctx) -} - -// get objects iterator under given storagePath and bucketName, use exclusionFilter to eliminate some files. -func (g *GCSClient) getObjectsIter(ctx context.Context, bucketName, storagePath, exclusionFilter string) *storage.ObjectIterator { - return g.Bucket(bucketName).Objects(ctx, &storage.Query{ - Prefix: storagePath, - Delimiter: exclusionFilter, - }) -} - -// Exists check if an object exists under a bucket, assuming bucket exists -func (g *GCSClient) Exists(ctx context.Context, bucketName, objPath string) bool { - // Check if this is a file - objHandle := g.Bucket(bucketName).Object(objPath) - if _, err := objHandle.Attrs(ctx); err == nil { - return true - } - - // Check if this is a directory, - // gcs directory paths are virtual paths, they automatically get deleted if there is no child file - _, err := g.getObjectsIter(ctx, bucketName, strings.TrimRight(objPath, " /")+"/", "").Next() - return err == nil -} - -// list child under storagePath, use exclusionFilter for skipping some files. -// This function gets all child files recursively under given storagePath, -// then filter out filenames containing given exclusionFilter. -// If exclusionFilter is empty string, returns all files but not directories, -// if exclusionFilter is "/", returns all direct children, including both files and directories. -// see https://godoc.org/cloud.google.com/go/storage#Query -func (g *GCSClient) list(ctx context.Context, bucketName, storagePath, exclusionFilter string) ([]string, error) { - objsAttrs, err := g.getObjectsAttrs(ctx, bucketName, storagePath, exclusionFilter) - if err != nil { - return nil, err - } - filePaths := make([]string, 0, len(objsAttrs)) - for _, attrs := range objsAttrs { - filePaths = append(filePaths, path.Join(attrs.Prefix, attrs.Name)) - } - return filePaths, nil -} - -// Query items under given gcs storagePath, use exclusionFilter to eliminate some files. -func (g *GCSClient) getObjectsAttrs(ctx context.Context, bucketName, storagePath, - exclusionFilter string) ([]*storage.ObjectAttrs, error) { - var allAttrs []*storage.ObjectAttrs - it := g.getObjectsIter(ctx, bucketName, storagePath, exclusionFilter) - - for { - attrs, err := it.Next() - if errors.Is(err, iterator.Done) { - break - } - if err != nil { - return nil, fmt.Errorf("error iterating: %w", err) - } - allAttrs = append(allAttrs, attrs) - } - return allAttrs, nil -} - -func (g *GCSClient) listChildren(ctx context.Context, bucketName, dirPath, exclusionFilter string) ([]string, error) { - if dirPath != "" { - dirPath = strings.TrimRight(dirPath, " /") + "/" - } - - return g.list(ctx, bucketName, dirPath, exclusionFilter) -} - -// ListChildrenFiles recursively lists all children files. -func (g *GCSClient) ListChildrenFiles(ctx context.Context, bucketName, dirPath string) ([]string, error) { - return g.listChildren(ctx, bucketName, dirPath, "") -} - -// ListDirectChildren lists direct children paths (including files and directories). -func (g *GCSClient) ListDirectChildren(ctx context.Context, bucketName, dirPath string) ([]string, error) { - // If there are 2 directories named "foo" and "foobar", - // then given storagePath "foo" will get files both under "foo" and "foobar". - // Add trailing slash to storagePath, so that only gets children under given directory. - return g.listChildren(ctx, bucketName, dirPath, "/") -} - -// CopyObject copies objects from one location to another. Assumes both source and destination buckets exist. -func (g *GCSClient) CopyObject(ctx context.Context, srcBucketName, srcPath, dstBucketName, dstPath string) error { - src := g.Bucket(srcBucketName).Object(srcPath) - dst := g.Bucket(dstBucketName).Object(dstPath) - - _, err := dst.CopierFrom(src).Run(ctx) - return err -} - -// Download gcs object to a file -func (g *GCSClient) Download(ctx context.Context, bucketName, objPath, dstPath string) error { - handle := g.Bucket(bucketName).Object(objPath) - if _, err := handle.Attrs(ctx); err != nil { - return err - } - - dst, err := os.OpenFile(dstPath, os.O_RDWR|os.O_CREATE, 0755) - if err != nil { - return err - } - src, err := handle.NewReader(ctx) - if err != nil { - return err - } - defer src.Close() - _, err = io.Copy(dst, src) - return err -} - -// Upload file to gcs object -func (g *GCSClient) Upload(ctx context.Context, bucketName, objPath, srcPath string) error { - src, err := os.Open(srcPath) - if err != nil { - return err - } - dst := g.Bucket(bucketName).Object(objPath).NewWriter(ctx) - defer dst.Close() - _, err = io.Copy(dst, src) - return err -} - -// AttrObject returns the object attributes -func (g *GCSClient) AttrObject(ctx context.Context, bucketName, objPath string) (*storage.ObjectAttrs, error) { - objHandle := g.Bucket(bucketName).Object(objPath) - return objHandle.Attrs(ctx) -} - -// ReadObject reads the content of a gcs object -func (g *GCSClient) ReadObject(ctx context.Context, bucketName, objPath string) ([]byte, error) { - var contents []byte - f, err := g.NewReader(ctx, bucketName, objPath) - if err != nil { - return contents, err - } - defer f.Close() - return io.ReadAll(f) -} - -// NewReader creates a new Reader of a gcs file. -// Important: caller must call Close on the returned Reader when done reading -func (g *GCSClient) NewReader(ctx context.Context, bucketName, objPath string) (*storage.Reader, error) { - o := g.Bucket(bucketName).Object(objPath) - if _, err := o.Attrs(ctx); err != nil { - return nil, err - } - return o.NewReader(ctx) -} - -// DeleteObject deletes an object -func (g *GCSClient) DeleteObject(ctx context.Context, bucketName, objPath string) error { - objHandle := g.Bucket(bucketName).Object(objPath) - return objHandle.Delete(ctx) -} - -// WriteObject writes the content to a gcs object -func (g *GCSClient) WriteObject(ctx context.Context, bucketName, objPath string, - content []byte) (n int, err error) { - objWriter := g.Bucket(bucketName).Object(objPath).NewWriter(ctx) - defer func() { - cerr := objWriter.Close() - if err == nil { - err = cerr - } - }() - - n, err = objWriter.Write(content) - return -} - -// ReadURL reads from a gsUrl and return a log structure -func (g *GCSClient) ReadURL(ctx context.Context, gcsURL string) ([]byte, error) { - bucket, obj, err := linkToBucketAndObject(gcsURL) - if err != nil { - return nil, err - } - - return g.ReadObject(ctx, bucket, obj) -} diff --git a/test/gcs/helpers.go b/test/gcs/helpers.go deleted file mode 100644 index 150999ed85..0000000000 --- a/test/gcs/helpers.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcs - -import ( - "fmt" - "net/url" - "path" - "strings" -) - -// get the bucket and object from the gsURL -func linkToBucketAndObject(gsURL string) (string, string, error) { - gsURL = strings.Replace(gsURL, "gs://", "", 1) - - sIdx := strings.IndexByte(gsURL, '/') - if sIdx == -1 || sIdx+1 >= len(gsURL) { - return "", "", fmt.Errorf("the gsUrl (%q) cannot be converted to bucket/object", gsURL) - } - - return gsURL[:sIdx], gsURL[sIdx+1:], nil -} - -// BuildLogPath returns the build log path from the test result gcsURL -func BuildLogPath(gcsURL string) (string, error) { - u, err := url.Parse(gcsURL) - if err != nil { - return gcsURL, err - } - u.Path = path.Join(u.Path, "build-log.txt") - return u.String(), nil -} - -// GetConsoleURL returns the gcs link renderable directly from a browser -func GetConsoleURL(gcsURL string) (string, error) { - u, err := url.Parse(gcsURL) - if err != nil { - return gcsURL, err - } - u.Path = path.Join("storage/browser", u.Host, u.Path) - u.Scheme = "https" - u.Host = "console.cloud.google.com" - return u.String(), nil -} diff --git a/test/gcs/helpers_test.go b/test/gcs/helpers_test.go deleted file mode 100644 index 5482e785fd..0000000000 --- a/test/gcs/helpers_test.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcs - -import ( - "errors" - "reflect" - "testing" -) - -func TestGetConsoleURL(t *testing.T) { - tests := []struct { - name string - arg string - want string - }{ - { - name: "Missing protocol", - arg: "knative-prow/logs/ci-knative-docs-continuous/1132539579983728640/", - want: "https://console.cloud.google.com/storage/browser/knative-prow/logs/ci-knative-docs-continuous/1132539579983728640", - }, - { - name: "gs protocol", - arg: "gs://knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640", - want: "https://console.cloud.google.com/storage/browser/knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got, _ := GetConsoleURL(tt.arg); got != tt.want { - t.Errorf("GetConsoleURL(%v), got: %v, want: %v", tt.arg, got, tt.want) - } - }) - } -} - -func TestBuildLogPath(t *testing.T) { - tests := []struct { - name string - arg string - want string - }{ - { - name: "Trailing slash", - arg: "gs://knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640/", - want: "gs://knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640/build-log.txt", - }, - { - name: "No Trailing slash", - arg: "gs://knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640", - want: "gs://knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640/build-log.txt", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got, _ := BuildLogPath(tt.arg); got != tt.want { - t.Errorf("BuildLogPath(%v), got: %v, want: %v", tt.arg, got, tt.want) - } - }) - } -} - -func TestLinkToBucketAndObject(t *testing.T) { - type result struct { - bucket string - object string - } - - tests := []struct { - name string - arg string - want *result - err error - }{ - { - name: "Valid gcsUrl", - arg: "gs://knative-prow/logs/ci-knative-client-go-coverage/1139250680293232640/build-log.txt", - want: &result{ - bucket: "knative-prow", - object: "logs/ci-knative-client-go-coverage/1139250680293232640/build-log.txt", - }, - err: nil, - }, - { - name: "Invalid gcsUrl - No slash", - arg: "knative-prow-no-object", - err: errors.New(`the gsUrl ("knative-prow-no-object") cannot be converted to bucket/object`), - }, - { - name: "Invalid gcsUrl - No object", - arg: "knative-prow/", - err: errors.New(`the gsUrl ("knative-prow/") cannot be converted to bucket/object`), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, o, err := linkToBucketAndObject(tt.arg) - got := &result{b, o} - if tt.err != nil && tt.err.Error() != err.Error() { - t.Errorf("linktoBucketAndObject(%v), got error: %v, want error: %v", tt.arg, err, tt.err) - } - if tt.err == nil && !reflect.DeepEqual(got, tt.want) { - t.Errorf("linktoBucketAndObject(%v), got: %v, want: %v", tt.arg, got, tt.want) - } - }) - } -} diff --git a/test/gcs/mock/errors.go b/test/gcs/mock/errors.go deleted file mode 100644 index fefda08c89..0000000000 --- a/test/gcs/mock/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mock - -import ( - "fmt" -) - -type notEmptyBucketError struct { - bkt string -} - -func (e *notEmptyBucketError) Error() string { - return fmt.Sprintf("bucket %q not empty, use force=true", e.bkt) -} - -func NewNotEmptyBucketError(bkt string) error { - return ¬EmptyBucketError{bkt} -} - -type noBucketError struct { - bkt string -} - -func NewNoBucketError(bkt string) error { - return &noBucketError{bkt} -} - -func (e *noBucketError) Error() string { - return fmt.Sprintf("no bucket %q", e.bkt) -} - -type bucketExistError struct { - bkt string -} - -func NewBucketExistError(bkt string) error { - return &bucketExistError{bkt} -} - -func (e *bucketExistError) Error() string { - return fmt.Sprintf("bucket %q already exists", e.bkt) -} - -type noObjectError struct { - bkt string - obj string - path string -} - -func NewNoObjectError(bkt, obj, path string) error { - return &noObjectError{ - bkt: bkt, - obj: obj, - path: path, - } -} - -func (e *noObjectError) Error() string { - return fmt.Sprintf("bucket %q does not contain object %q under path %q", - e.bkt, e.obj, e.path) -} diff --git a/test/gcs/mock/mock.go b/test/gcs/mock/mock.go deleted file mode 100644 index 7ee63f3dcc..0000000000 --- a/test/gcs/mock/mock.go +++ /dev/null @@ -1,449 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mock - -import ( - "context" - "os" - "path/filepath" - "strings" - - "cloud.google.com/go/storage" -) - -// I don't know if it is easier or not to use go mock, but we really only need two things: -// 1) Ability to mimic creation of buckets and objects -// 2) Ability to mimic returning errors -// -// We don't need arbitrary return values, so generators like go mock or testify might be -// overkill and doesn't give us the flexibility we need (e.g., will have to specify and -// and reason about the state after each call rather than just pretend we have this fake -// storage. The behavior of these commands, at the level of detail we care about, is pretty -// easy to replicate. - -var ( - // MethodNewStorageBucket mocks MethodNewStorageBucket. - MethodNewStorageBucket = Method("NewStorageBucket") - // MethodDeleteStorageBucket mocks NewDeleteStorageBucket. - MethodDeleteStorageBucket = Method("NewDeleteStorageBucket") - // MethodListChildrenFiles mocks ListChildrenFiles. - MethodListChildrenFiles = Method("ListChildrenFiles") - // MethodListDirectChildren mocks ListDirectChildren. - MethodListDirectChildren = Method("ListDirectChildren") - // MethodAttrObject mocks AttrObject. - MethodAttrObject = Method("AttrObject") - // MethodCopyObject mocks CopyObject. - MethodCopyObject = Method("CopyObject") - // MethodReadObject mocks ReadObject. - MethodReadObject = Method("ReadObject") - // MethodWriteObject mocks WriteObject. - MethodWriteObject = Method("WriteObject") - // MethodDeleteObject mocks DeleteObject. - MethodDeleteObject = Method("DeleteObject") - // MethodDownload mocks Download. - MethodDownload = Method("Download") - // MethodUpload mocks Upload. - MethodUpload = Method("Upload") -) - -// mock GCS Client -type clientMocker struct { - // project with buckets - gcp map[project]*buckets - // error map - // - on each call of the higher level function that calls any number of methods - // in this library, you can use SetError(map[Method]*ReturnError) or ClearError() - // to create the error return values you want. Default is nil. - err map[Method]*ReturnError - - // reverse index to lookup which project a bucket is under as GCS has a global - // bucket namespace. - revIndex map[bucket]project -} - -func newClientMocker() *clientMocker { - return &clientMocker{ - gcp: make(map[project]*buckets), - err: make(map[Method]*ReturnError), - revIndex: make(map[bucket]project), - } -} - -// SetError sets the number of calls of an interface function before an error is returned. -// Otherwise it will return the err of the mock function itself (which is usually nil). -func (c *clientMocker) SetError(m map[Method]*ReturnError) { - c.err = m -} - -// ClearError clears the error map in mock client -func (c *clientMocker) ClearError() { - for k := range c.err { - // Apparently Go is okay with deleting keys as you iterate. - delete(c.err, k) - } -} - -// getError is a helper that returns the error if it is set for this function -func (c *clientMocker) getError(funcName Method) (bool, error) { - if val, ok := c.err[funcName]; ok { - if val.NumCall == 0 { - delete(c.err, funcName) - return true, val.Err - } - val.NumCall-- - } - return false, nil -} - -// getBucketRoot is a helper that returns the objects bucket if it exists -func (c *clientMocker) getBucketRoot(bkt string) *objects { - p, ok := c.revIndex[bucket(bkt)] - if !ok { - return nil - } - - bktRoot, ok := c.gcp[p].bkt[bucket(bkt)] - if !ok { - return nil - } - return bktRoot -} - -// NewStorageBucket mock creates a new storage bucket in gcp -func (c *clientMocker) NewStorageBucket(ctx context.Context, bkt, projectName string) error { - if override, err := c.getError(MethodNewStorageBucket); override { - return err - } - - p := project(projectName) - - if _, ok := c.revIndex[bucket(bkt)]; ok { - return NewBucketExistError(bkt) - } - - if _, ok := c.gcp[p]; !ok { - c.gcp[p] = &buckets{ - bkt: make(map[bucket]*objects), - } - } - c.gcp[p].bkt[bucket(bkt)] = &objects{ - obj: make(map[mockpath]*object), - } - c.revIndex[bucket(bkt)] = p - return nil -} - -// DeleteStorageBucket mock deletes a storage bucket from gcp, force if not empty -func (c *clientMocker) DeleteStorageBucket(ctx context.Context, bkt string, force bool) error { - if override, err := c.getError(MethodDeleteStorageBucket); override { - return err - } - - bktName := bucket(bkt) - - p, ok := c.revIndex[bktName] - if !ok { - return NewNoBucketError(bkt) - } - - if len(c.gcp[p].bkt) != 0 && !force { - return NewNotEmptyBucketError(bkt) - } - - delete(c.gcp[p].bkt, bktName) - delete(c.revIndex, bktName) - return nil -} - -// Exists mock check if an object exists -func (c *clientMocker) Exists(ctx context.Context, bkt, objPath string) bool { - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return false - } - - // just the bucket - if objPath == "" { - return true - } - - dir, obj := filepath.Split(objPath) - if _, ok := bktRoot.obj[newMockPath(dir, obj)]; ok { - return true - } - - // could be asking for if a directory exists. Since our structure is flat, at - // path of an object containing the searched for directory as its subpath means - // the directory "exists" - // NOTE: this is inefficient....but we are not scale testing with mock anyway. - for k := range bktRoot.obj { - if strings.HasPrefix(k.dir, objPath) { - return true - } - } - return false -} - -// ListChildrenFiles mock lists all children recursively -func (c *clientMocker) ListChildrenFiles(ctx context.Context, bkt, dirPath string) ([]string, error) { - if override, err := c.getError(MethodListChildrenFiles); override { - return nil, err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return nil, NewNoBucketError(bkt) - } - - if dirPath != "" { - dirPath = strings.TrimRight(dirPath, " /") + "/" - } - var children []string - for k := range bktRoot.obj { - if strings.HasPrefix(k.dir, dirPath) { - children = append(children, k.toString()) - } - } - - return children, nil -} - -// mock lists all direct children recursively -func (c *clientMocker) ListDirectChildren(ctx context.Context, bkt, dirPath string) ([]string, error) { - if override, err := c.getError(MethodListDirectChildren); override { - return nil, err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return nil, NewNoBucketError(bkt) - } - - if dirPath != "" { - dirPath = strings.TrimRight(dirPath, " /") + "/" - } - var children []string - for k := range bktRoot.obj { - if k.dir == dirPath { - children = append(children, k.toString()) - } - } - - return children, nil -} - -// AttrObject mock returns the attribute of an object -func (c *clientMocker) AttrObject(ctx context.Context, bkt, objPath string) (*storage.ObjectAttrs, error) { - if override, err := c.getError(MethodAttrObject); override { - return nil, err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return nil, NewNoBucketError(bkt) - } - - dir, obj := filepath.Split(objPath) - if obj == "" { - return nil, NewNoObjectError(bkt, obj, dir) - } - o, ok := bktRoot.obj[newMockPath(dir, obj)] - if !ok { - return nil, NewNoObjectError(bkt, obj, dir) - } - - return &storage.ObjectAttrs{ - Bucket: bkt, - Name: objPath, - Size: int64(len(o.content)), - }, nil -} - -// CopyObject mocks the copying of one object to another -func (c *clientMocker) CopyObject(ctx context.Context, srcBkt, srcObjPath, dstBkt, dstObjPath string) error { - if override, err := c.getError(MethodCopyObject); override { - return err - } - - srcBktRoot := c.getBucketRoot(srcBkt) - if srcBktRoot == nil { - return NewNoBucketError(srcBkt) - } - - dstBktRoot := c.getBucketRoot(dstBkt) - if dstBktRoot == nil { - return NewNoBucketError(dstBkt) - } - - srcDir, srcObjName := filepath.Split(srcObjPath) - if srcObjName == "" { - return NewNoObjectError(srcBkt, srcObjName, srcDir) - } - - dstDir, dstObjName := filepath.Split(dstObjPath) - if dstObjName == "" { - return NewNoObjectError(dstBkt, dstObjName, dstDir) - } - - srcMockPath := newMockPath(srcDir, srcObjName) - dstMockPath := newMockPath(dstDir, dstObjName) - - srcObj, ok := srcBktRoot.obj[srcMockPath] - if !ok { - return NewNoObjectError(srcBkt, srcObjName, srcDir) - } - - dstBktRoot.obj[dstMockPath] = &object{ - name: srcObj.name, - bkt: dstBkt, - content: make([]byte, len(srcBktRoot.obj[srcMockPath].content)), - } - copy(dstBktRoot.obj[dstMockPath].content, srcBktRoot.obj[srcMockPath].content) - return nil -} - -// ReadObject mocks reading from an object -func (c *clientMocker) ReadObject(ctx context.Context, bkt, objPath string) ([]byte, error) { - if override, err := c.getError(MethodReadObject); override { - return nil, err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return nil, NewNoBucketError(bkt) - } - - dir, objName := filepath.Split(objPath) - if objName == "" { - return nil, NewNoObjectError(bkt, objName, dir) - } - - obj, ok := bktRoot.obj[newMockPath(dir, objName)] - if !ok { - return nil, NewNoObjectError(bkt, objName, dir) - } - - return obj.content, nil -} - -// WriteObject mocks writing to an object -func (c *clientMocker) WriteObject(ctx context.Context, bkt, objPath string, content []byte) (int, error) { - if override, err := c.getError(MethodWriteObject); override { - return -1, err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return -1, NewNoBucketError(bkt) - } - - dir, objName := filepath.Split(objPath) - if objName == "" { - return -1, NewNoObjectError(bkt, objName, dir) - } - - mockPath := newMockPath(dir, objName) - bktRoot.obj[mockPath] = &object{ - name: mockPath, - bkt: bkt, - content: make([]byte, len(content)), - } - copy(bktRoot.obj[mockPath].content, content) - return len(content), nil -} - -// DeleteObject mocks deleting an object -func (c *clientMocker) DeleteObject(ctx context.Context, bkt, objPath string) error { - if override, err := c.getError(MethodDeleteObject); override { - return err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return nil - } - - dir, objName := filepath.Split(objPath) - if objName == "" { - return nil - } - - delete(bktRoot.obj, newMockPath(dir, objName)) - return nil -} - -// Download mocks downloading an object to a local file -func (c *clientMocker) Download(ctx context.Context, bkt, objPath, filePath string) error { - if override, err := c.getError(MethodDownload); override { - return err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return NewNoBucketError(bkt) - } - - dir, objName := filepath.Split(objPath) - if objName == "" { - return NewNoObjectError(bkt, objName, dir) - } - - obj, ok := bktRoot.obj[newMockPath(dir, objName)] - if !ok { - return NewNoObjectError(bkt, objName, dir) - } - - f, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0755) - if err != nil { - return err - } - defer f.Close() - - _, err = f.Write(obj.content) - return err -} - -// Upload mocks uploading a local file to an object -func (c *clientMocker) Upload(ctx context.Context, bkt, objPath, filePath string) error { - if override, err := c.getError(MethodUpload); override { - return err - } - - bktRoot := c.getBucketRoot(bkt) - if bktRoot == nil { - return NewNoBucketError(bkt) - } - - dir, objName := filepath.Split(objPath) - if objName == "" { - return NewNoObjectError(bkt, objName, dir) - } - - content, err := os.ReadFile(filePath) - if err != nil { - return err - } - - mockPath := newMockPath(dir, objName) - bktRoot.obj[mockPath] = &object{ - name: mockPath, - bkt: bkt, - content: make([]byte, len(content)), - } - copy(bktRoot.obj[mockPath].content, content) - return nil -} diff --git a/test/gcs/mock/mock_example_test.go b/test/gcs/mock/mock_example_test.go deleted file mode 100644 index ef7ff43a5c..0000000000 --- a/test/gcs/mock/mock_example_test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mock - -import ( - "context" - "fmt" - - "knative.dev/pkg/test/gcs" -) - -// Example on how to override errors -const ( - bkt = "NewBkt" - proj = "NewProject" -) - -func topFunction(c gcs.Client) error { - ctx := context.Background() - if err := c.NewStorageBucket(ctx, bkt, proj); err != nil { - return err - } - - // Should have returned error, but SetError override to nil - if _, err := c.ReadObject(ctx, bkt, "non-existent-file"); err != nil { - return err - } - - if _, err := c.ListChildrenFiles(ctx, bkt, ""); err != nil { - return err - } - - if _, err := c.ListChildrenFiles(ctx, bkt, ""); err != nil { - return err - } - - // Should not have returned error, but SetError override to NewNoBucketError(bkt) - if _, err := c.ListChildrenFiles(ctx, bkt, ""); err != nil { - return err - } - - return nil -} - -func Example_topFunction() { - mockClient := newClientMocker() - - // Call to ReadObject, first call should return error, but returns nil - // because it is overridden. - mockClient.SetError( - map[Method]*ReturnError{ - MethodReadObject: { - NumCall: uint8(0), - Err: nil, - }, - MethodListChildrenFiles: { - NumCall: uint8(2), - Err: NewNoBucketError(bkt), - }, - }) - - fmt.Println(topFunction(mockClient)) - // Output: - // no bucket "NewBkt" -} diff --git a/test/gcs/mock/mock_storage.go b/test/gcs/mock/mock_storage.go deleted file mode 100644 index 7036945cdf..0000000000 --- a/test/gcs/mock/mock_storage.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mock - -import ( - "path/filepath" -) - -// more friendly type casts for better readability of what some strings are -type project string -type bucket string -type Method string - -// mockpath contains the bucket path to an object and the object name -type mockpath struct { - dir string - obj string -} - -func newMockPath(dir, obj string) mockpath { - return mockpath{ - dir: dir, - obj: obj, - } -} - -// toString stringify mockpath -func (m mockpath) toString() string { - return filepath.Join(m.dir, m.obj) -} - -// Fake GCS objects -type object struct { - name mockpath - //NOTE: current ObjectAttrs supported: - // Size - // Bucket - // Name - bkt string - content []byte -} - -// bucket of objects - structure is flat -type objects struct { - obj map[mockpath]*object -} - -// project with buckets -type buckets struct { - bkt map[bucket]*objects -} - -// ReturnError is a custom error for specific methods -type ReturnError struct { - NumCall uint8 - Err error -} diff --git a/test/gcs/mock/mock_test.go b/test/gcs/mock/mock_test.go deleted file mode 100644 index 40eb777435..0000000000 --- a/test/gcs/mock/mock_test.go +++ /dev/null @@ -1,1041 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mock - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "path" - "reflect" - "sort" - "testing" -) - -func TestSetError(t *testing.T) { - ctx := context.Background() - const ( - bkt = "fake" - project = "fake" - dirPath = "/" - ) - - m := map[Method]*ReturnError{ - MethodNewStorageBucket: { - NumCall: 2, - Err: fmt.Errorf("MethodNewStorageBucket Error"), - }, - MethodDeleteStorageBucket: { - NumCall: 1, - Err: fmt.Errorf("MethodDeleteStorageBucketError"), - }, - MethodListChildrenFiles: { - NumCall: 0, - Err: fmt.Errorf("MethodListChildrenFilesError"), - }, - MethodListDirectChildren: { - NumCall: 1, - Err: fmt.Errorf("MethodListDirectChildrenError"), - }, - MethodAttrObject: { - NumCall: 2, - Err: fmt.Errorf("MethodAttrObjectError"), - }, - MethodCopyObject: { - NumCall: 3, - Err: fmt.Errorf("MethodCopyObjectError"), - }, - MethodReadObject: { - NumCall: 2, - Err: fmt.Errorf("MethodReadObjectError"), - }, - MethodWriteObject: { - NumCall: 1, - Err: fmt.Errorf("MethodWriteObjectError"), - }, - MethodDeleteObject: { - NumCall: 0, - Err: fmt.Errorf("MethodDeleteObjectError"), - }, - MethodDownload: { - NumCall: 1, - Err: fmt.Errorf("MethodDownload"), - }, - MethodUpload: { - NumCall: 2, - Err: fmt.Errorf("MethodUpload"), - }, - } - - mockClient := newClientMocker() - mockClient.SetError(m) - - for k, v := range m { - switch numCall := int(v.NumCall); k { - case MethodNewStorageBucket: - for i := 0; i < numCall; i++ { - mockClient.NewStorageBucket(ctx, bkt, project) - } - - if err := mockClient.NewStorageBucket(ctx, bkt, project); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodDeleteStorageBucket: - for i := 0; i < numCall; i++ { - mockClient.DeleteStorageBucket(ctx, bkt, true) - } - - if err := mockClient.DeleteStorageBucket(ctx, bkt, true); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodListChildrenFiles: - for i := 0; i < numCall; i++ { - mockClient.ListChildrenFiles(ctx, bkt, dirPath) - } - - if _, err := mockClient.ListChildrenFiles(ctx, bkt, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodListDirectChildren: - for i := 0; i < numCall; i++ { - mockClient.ListDirectChildren(ctx, bkt, dirPath) - } - - if _, err := mockClient.ListDirectChildren(ctx, bkt, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodAttrObject: - for i := 0; i < numCall; i++ { - mockClient.AttrObject(ctx, bkt, dirPath) - } - - if _, err := mockClient.AttrObject(ctx, bkt, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodCopyObject: - for i := 0; i < numCall; i++ { - mockClient.CopyObject(ctx, bkt, dirPath, bkt, dirPath) - } - - if err := mockClient.CopyObject(ctx, bkt, dirPath, bkt, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodReadObject: - for i := 0; i < numCall; i++ { - mockClient.ReadObject(ctx, bkt, dirPath) - } - - if _, err := mockClient.ReadObject(ctx, bkt, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodWriteObject: - for i := 0; i < numCall; i++ { - mockClient.WriteObject(ctx, bkt, dirPath, []byte{}) - } - - if _, err := mockClient.WriteObject(ctx, bkt, dirPath, []byte{}); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodDeleteObject: - for i := 0; i < numCall; i++ { - mockClient.DeleteObject(ctx, bkt, dirPath) - } - - if err := mockClient.DeleteObject(ctx, bkt, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodDownload: - for i := 0; i < numCall; i++ { - mockClient.Download(ctx, bkt, dirPath, dirPath) - } - - if err := mockClient.Download(ctx, bkt, dirPath, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - case MethodUpload: - for i := 0; i < numCall; i++ { - mockClient.Upload(ctx, bkt, dirPath, dirPath) - } - - if err := mockClient.Upload(ctx, bkt, dirPath, dirPath); err == nil { - t.Error("Expected error", v.Err) - } else if err.Error() != v.Err.Error() { - t.Errorf("Expected error %v, got error %v", v.Err, err) - } - default: - t.Errorf("unknown method") - } - } -} - -func TestClearError(t *testing.T) { - ctx := context.Background() - bkt := "fake" - project := "fake" - dirPath := "/" - - testCases := []struct { - testname string - m map[Method]*ReturnError //error map to load into mockClient - }{ - { - testname: "set errors for methods", - m: map[Method]*ReturnError{ - MethodNewStorageBucket: { - NumCall: 0, - Err: fmt.Errorf("MethodNewStorageBucket Error"), - }, - MethodDeleteStorageBucket: { - NumCall: 0, - Err: fmt.Errorf("MethodDeleteStorageBucketError"), - }, - MethodListChildrenFiles: { - NumCall: 0, - Err: fmt.Errorf("MethodListChildrenFilesError"), - }, - MethodListDirectChildren: { - NumCall: 0, - Err: fmt.Errorf("MethodListDirectChildrenError"), - }, - MethodAttrObject: { - NumCall: 0, - Err: fmt.Errorf("MethodAttrObjectError"), - }, - MethodCopyObject: { - NumCall: 0, - Err: fmt.Errorf("MethodCopyObjectError"), - }, - MethodReadObject: { - NumCall: 0, - Err: fmt.Errorf("MethodReadObjectError"), - }, - MethodWriteObject: { - NumCall: 0, - Err: fmt.Errorf("MethodWriteObjectError"), - }, - MethodDeleteObject: { - NumCall: 0, - Err: fmt.Errorf("MethodDeleteObjectError"), - }, - MethodDownload: { - NumCall: 0, - Err: fmt.Errorf("MethodDownload"), - }, - MethodUpload: { - NumCall: 0, - Err: fmt.Errorf("MethodUpload"), - }, - }, - }, - } - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - mockClient := newClientMocker() - mockClient.SetError(tt.m) - mockClient.ClearError() - - for k, v := range tt.m { - switch k { - case MethodNewStorageBucket: - if err := mockClient.NewStorageBucket(ctx, bkt, project); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodDeleteStorageBucket: - if err := mockClient.DeleteStorageBucket(ctx, bkt, true); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodListChildrenFiles: - if _, err := mockClient.ListChildrenFiles(ctx, bkt, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodListDirectChildren: - if _, err := mockClient.ListDirectChildren(ctx, bkt, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodAttrObject: - if _, err := mockClient.AttrObject(ctx, bkt, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodCopyObject: - if err := mockClient.CopyObject(ctx, bkt, dirPath, bkt, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodReadObject: - if _, err := mockClient.ReadObject(ctx, bkt, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodWriteObject: - if _, err := mockClient.WriteObject(ctx, bkt, dirPath, []byte{}); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodDeleteObject: - if err := mockClient.DeleteObject(ctx, bkt, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodDownload: - if err := mockClient.Download(ctx, bkt, dirPath, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - case MethodUpload: - if err := mockClient.Upload(ctx, bkt, dirPath, dirPath); err != nil && err.Error() == v.Err.Error() { - t.Errorf("error %v should have been cleared", v.Err) - } - default: - t.Errorf("unknown method") - } - } - }) - } -} - -func TestNewStorageBucket(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - bktName2 = "test-bucket2" - project2 = "test-project2" - ) - - testCases := []struct { - testname string - bkt string - projectName string - err error - }{{ - testname: "createNewBucket", - bkt: bktName1, - projectName: project1, - }, { - testname: "ExistingNewBucket", - bkt: bktName1, - projectName: project1, - err: NewBucketExistError(bktName1), - }, { - testname: "ExistingNewBucketDifferentProject", - bkt: bktName1, - projectName: project2, - err: NewBucketExistError(bktName1), - }, { - testname: "secondNewBucket", - bkt: bktName2, - projectName: project1, - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - err := mockClient.NewStorageBucket(ctx, tt.bkt, tt.projectName) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - if p, ok := mockClient.revIndex[bucket(tt.bkt)]; !ok { - t.Fatal("Expected revIndex to contain key", bucket(tt.bkt)) - } else if p != project(tt.projectName) { - t.Fatalf("Expected revIndex value %v, got %v", project(tt.projectName), p) - } - - if p, ok := mockClient.gcp[project(tt.projectName)]; !ok { - t.Fatal("Expected gcp to contain key", project(tt.projectName)) - } else if _, ok := p.bkt[bucket(tt.bkt)]; !ok { - t.Fatal("Expected gcp.bucket to contain key", bucket(tt.bkt)) - } - }) - } -} - -func TestDeleteStorageBucket(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - ) - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, "object1", []byte("Hello")) - - testCases := []struct { - testname string - bkt string - force bool - err error - }{{ - testname: "deleteBucket", - bkt: bktName1, - err: NewNotEmptyBucketError(bktName1), - }, { - testname: "deleteBucket", - bkt: bktName1, - force: true, - }, { - testname: "deleteNonExistentBucket", - bkt: bktName1, - err: NewNoBucketError(bktName1), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - err := mockClient.DeleteStorageBucket(ctx, tt.bkt, tt.force) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - if _, ok := mockClient.revIndex[bucket(tt.bkt)]; ok { - t.Fatalf("bucket %v should have been deleted", bucket(tt.bkt)) - } - - if _, ok := mockClient.gcp[project(project1)].bkt[bucket(tt.bkt)]; ok { - t.Fatalf("bucket %v should have been deleted", bucket(tt.bkt)) - } - }) - } -} - -func TestExists(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - object1 = "object1" - dir1 = "dir" - ) - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, path.Join(dir1, object1), content) - - testCases := []struct { - testname string - bkt string - objpath string - exist bool - }{{ - testname: "ExistObject", - bkt: bktName1, - objpath: path.Join(dir1, object1), - exist: true, - }, { - testname: "ExistBucket", - bkt: bktName1, - objpath: "", - exist: true, - }, { - testname: "ExistDir", - bkt: bktName1, - objpath: "dir", - exist: true, - }, { - testname: "nonexistentObject", - bkt: bktName1, - objpath: "badobjectpath", - exist: false, - }, { - testname: "nonexistentBkt", - bkt: "non-existent-bucket", - exist: false, - }} - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - if exist := mockClient.Exists(ctx, tt.bkt, tt.objpath); exist != tt.exist { - t.Fatalf("Expected exist %v to return %v, got %v", tt.objpath, tt.exist, exist) - } - }) - } -} - -func TestListChildrenFiles(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - bktName1 := "test-bucket1" - project1 := "test-project1" - dir1 := "dir" - dir2 := "dir/subdir" - object1 := path.Join(dir1, "object1") - object2 := path.Join(dir1, "object2") - object3 := path.Join(dir2, "object3") - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, object1, content) - mockClient.WriteObject(ctx, bktName1, object2, content) - mockClient.WriteObject(ctx, bktName1, object3, content) - - testCases := []struct { - testname string - bkt string - dir string - expected []string - err error - }{{ - testname: "listAllChildrenObjects", - bkt: bktName1, - dir: "dir", - expected: []string{object1, object2, object3}, - }, { - testname: "listAllChildrenObjects", - bkt: bktName1, - expected: []string{object1, object2, object3}, - }, { - testname: "badBucket", - bkt: "non-existent-bucket", - err: NewNoBucketError("non-existent-bucket"), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - children, err := mockClient.ListChildrenFiles(ctx, tt.bkt, tt.dir) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - sort.Strings(children) - sort.Strings(tt.expected) - if !reflect.DeepEqual(children, tt.expected) { - t.Fatalf("Expected %v, got %v", tt.expected, children) - } - - }) - } -} - -func TestListDirectChildren(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - dir1 = "dir" - dir2 = "dir/subdir" - object4 = "object4" - ) - object1 := path.Join(dir1, "object1") - object2 := path.Join(dir1, "object2") - object3 := path.Join(dir2, "object3") - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, object1, content) - mockClient.WriteObject(ctx, bktName1, object2, content) - mockClient.WriteObject(ctx, bktName1, object3, content) - mockClient.WriteObject(ctx, bktName1, object4, content) - - testCases := []struct { - testname string - bkt string - dir string - expected []string - err error - }{{ - testname: "listAllChildrenObjects", - bkt: bktName1, - dir: "dir", - expected: []string{object1, object2}, - }, { - testname: "listAllChildrenObjects", - bkt: bktName1, - expected: []string{object4}, - }, { - testname: "badBucket", - bkt: "non-existent-bucket", - err: NewNoBucketError("non-existent-bucket"), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - children, err := mockClient.ListDirectChildren(ctx, tt.bkt, tt.dir) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - sort.Strings(children) - sort.Strings(tt.expected) - if !reflect.DeepEqual(children, tt.expected) { - t.Fatalf("Expected %v, got %v", tt.expected, children) - } - - }) - } -} - -func TestAttrObject(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - object1 = "dir/object1" - ) - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, object1, content) - - testCases := []struct { - testname string - bkt string - objpath string - size int64 - err error - }{{ - testname: "ExistObjectAttr", - bkt: bktName1, - objpath: object1, - size: int64(len(content)), - }, { - testname: "badObject", - bkt: bktName1, - objpath: "badobjectpath", - err: NewNoObjectError(bktName1, "badobjectpath", ""), - }, { - testname: "badBucket", - bkt: "non-existent-bucket", - err: NewNoBucketError("non-existent-bucket"), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - objAttr, err := mockClient.AttrObject(ctx, tt.bkt, tt.objpath) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - if objAttr.Bucket != tt.bkt { - t.Fatalf("Expected content %v, got content %v", tt.bkt, objAttr.Bucket) - } else if objAttr.Name != tt.objpath { - t.Fatalf("Expected content %v, got content %v", tt.objpath, objAttr.Name) - } else if objAttr.Size != tt.size { - t.Fatalf("Expected content %v, got content %v", tt.size, objAttr.Size) - } - }) - } -} - -func TestCopyObject(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - bktName2 = "test-bucket2" - project1 = "test-project1" - object1 = "dir/object1" - ) - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.NewStorageBucket(ctx, bktName2, project1) - mockClient.WriteObject(ctx, bktName1, object1, content) - - testCases := []struct { - testname string - srcBkt string - srcObjPath string - dstBkt string - dstObjPath string - err error - }{{ - testname: "copySameBucket", - srcBkt: bktName1, - srcObjPath: object1, - dstBkt: bktName1, - dstObjPath: "dir/object2", - err: nil, - }, { - testname: "copyAnotherBucket", - srcBkt: bktName1, - srcObjPath: object1, - dstBkt: bktName2, - dstObjPath: "dir/object2", - err: nil, - }, { - testname: "badSrcObject", - srcBkt: bktName1, - srcObjPath: "badobjectpath", - dstBkt: bktName2, - dstObjPath: "dir/object2", - err: NewNoObjectError(bktName1, "badobjectpath", ""), - }, { - testname: "badDstObject", - srcBkt: bktName1, - srcObjPath: object1, - dstBkt: bktName1, - dstObjPath: "badobjectpath/", - err: NewNoObjectError(bktName1, "", "badobjectpath/"), - }, { - testname: "badSrcBucket", - srcBkt: "non-existent-bucket", - dstBkt: bktName1, - err: NewNoBucketError("non-existent-bucket"), - }, { - testname: "badDstBucket", - srcBkt: bktName1, - dstBkt: "non-existent-bucket", - err: NewNoBucketError("non-existent-bucket"), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - err := mockClient.CopyObject(ctx, tt.srcBkt, tt.srcObjPath, tt.dstBkt, tt.dstObjPath) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - objContent, err := mockClient.ReadObject(ctx, tt.dstBkt, tt.dstObjPath) - if err != nil { - t.Fatalf("cannot read %v from bucket %v, got error %v", tt.dstObjPath, tt.dstBkt, err) - } - - if !bytes.Equal(objContent, content) { - t.Fatalf("Expected copied content %v, got content %v", content, objContent) - } - }) - } -} - -func TestReadObject(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - object1 = "object1" - badBkt = "non-existent-bucket" - ) - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, object1, content) - - testCases := []struct { - testname string - bkt string - objpath string - err error - }{{ - testname: "readObject", - bkt: bktName1, - objpath: path.Join(object1), - err: nil, - }, { - testname: "ReadObjectBadPath", - bkt: bktName1, - objpath: object1 + "/", - err: NewNoObjectError(bktName1, "", object1+"/"), - }, { - testname: "ReadObjectBadBucket", - bkt: badBkt, - err: NewNoBucketError(badBkt), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - objContent, err := mockClient.ReadObject(ctx, tt.bkt, tt.objpath) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - if !bytes.Equal(content, objContent) { - t.Fatalf("Expected content %v, got content %v", content, objContent) - } - }) - } -} - -func TestWriteObject(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - badBkt = "non-existent-bucket" - ) - - mockClient.NewStorageBucket(ctx, bktName1, project1) - - testCases := []struct { - testname string - bkt string - objpath string - content []byte - err error - }{{ - testname: "writeObject", - bkt: bktName1, - objpath: "testing/object", - content: []byte("Hello World"), - err: nil, - }, { - testname: "writeObjectBadPath", - bkt: bktName1, - objpath: "testing/", - err: NewNoObjectError(bktName1, "", "testing/"), - }, { - testname: "writeObjectBadBucket", - bkt: badBkt, - err: NewNoBucketError(badBkt), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - n, err := mockClient.WriteObject(ctx, tt.bkt, tt.objpath, tt.content) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - if n != len(tt.content) { - t.Fatalf("content has length %v, wrote only %v bytes", len(tt.content), n) - } - - if content, err := mockClient.ReadObject(ctx, tt.bkt, tt.objpath); err != nil { - t.Fatal("read object returned error", err) - } else if !bytes.Equal(content, tt.content) { - t.Fatalf("Expected content %v, got content %v", tt.content, content) - } - }) - } -} - -func TestDeleteObject(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - const ( - bktName1 = "test-bucket1" - project1 = "test-project1" - object1 = "dir/object1" - ) - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, object1, []byte("Hello World")) - - testCases := []struct { - testname string - bkt string - objpath string - err error - }{{ - testname: "DeleteObject", - bkt: bktName1, - objpath: object1, - err: nil, - }, { - testname: "DeleteNonExistentObject", - bkt: bktName1, - objpath: "non-existent-object", - err: nil, - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - err := mockClient.DeleteObject(ctx, tt.bkt, tt.objpath) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - if mockClient.Exists(ctx, tt.bkt, tt.objpath) { - t.Fatalf("%v in %v should not exist (deleted)", tt.objpath, tt.bkt) - } - }) - } -} - -func TestDownload(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - bktName1 := "test-bucket1" - project1 := "test-project1" - object1 := "dir/object1" - file := "test/download" - defer os.Remove(file) - content := []byte("Hello World") - - mockClient.NewStorageBucket(ctx, bktName1, project1) - mockClient.WriteObject(ctx, bktName1, object1, content) - - testCases := []struct { - testname string - bkt string - objPath string - err error - }{{ - testname: "downloadObject", - bkt: bktName1, - objPath: object1, - err: nil, - }, { - testname: "badObject", - bkt: bktName1, - objPath: "badobjectpath", - err: NewNoObjectError(bktName1, "badobjectpath", ""), - }, { - testname: "badBucket", - bkt: "non-existent-bucket", - err: NewNoBucketError("non-existent-bucket"), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - err := mockClient.Download(ctx, tt.bkt, tt.objPath, file) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - fileContent, err := os.ReadFile(file) - if err != nil { - t.Fatalf("cannot read content %v, error %v", file, err) - } - if !bytes.Equal(fileContent, content) { - t.Fatalf("Expected copied content %v, got content %v", content, fileContent) - } - }) - } -} - -func TestUpload(t *testing.T) { - ctx := context.Background() - mockClient := newClientMocker() - bktName1 := "test-bucket1" - project1 := "test-project1" - object1 := "dir/object1" - file := "test/upload" - content, err := os.ReadFile(file) - if err != nil { - t.Fatalf("cannot read content %v, error %v", file, err) - } - - mockClient.NewStorageBucket(ctx, bktName1, project1) - - testCases := []struct { - testname string - bkt string - objPath string - err error - }{{ - testname: "uploadObject", - bkt: bktName1, - objPath: object1, - err: nil, - }, { - testname: "badObject", - bkt: bktName1, - objPath: "badobjectpath/", - err: NewNoObjectError(bktName1, "", "badobjectpath/"), - }, { - testname: "badBucket", - bkt: "non-existent-bucket", - err: NewNoBucketError("non-existent-bucket"), - }} - - for _, tt := range testCases { - t.Run(tt.testname, func(t *testing.T) { - err := mockClient.Upload(ctx, tt.bkt, tt.objPath, file) - if (tt.err == nil || err == nil) && !errors.Is(err, tt.err) { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } else if (tt.err != nil && err != nil) && tt.err.Error() != err.Error() { - t.Fatalf("Expected error %v, got error %v", tt.err, err) - } - - if tt.err != nil { - return - } - - objContent, err := mockClient.ReadObject(ctx, tt.bkt, tt.objPath) - if err != nil { - t.Fatalf("Cannot read content %v in bucket %v, error %v", tt.objPath, tt.bkt, err) - } - if !bytes.Equal(objContent, content) { - t.Fatalf("Expected copied content %v, got content %v", content, objContent) - } - }) - } -} diff --git a/test/gcs/mock/test/upload b/test/gcs/mock/test/upload deleted file mode 100644 index 557db03de9..0000000000 --- a/test/gcs/mock/test/upload +++ /dev/null @@ -1 +0,0 @@ -Hello World diff --git a/test/gke/addon.go b/test/gke/addon.go deleted file mode 100644 index f8f98d0254..0000000000 --- a/test/gke/addon.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "fmt" - "strings" - - container "google.golang.org/api/container/v1beta1" -) - -const ( - // Define all supported addons here - istio = "istio" - hpa = "horizontalpodautoscaling" - hlb = "httploadbalancing" - cloudRun = "cloudrun" -) - -// GetAddonsConfig gets AddonsConfig from a slice of addon names, contains the logic of -// converting string argument to typed AddonsConfig, for example `IstioConfig`. -// Currently supports Istio, HorizontalPodAutoscaling, HttpLoadBalancing and CloudRun. -func GetAddonsConfig(addons []string) *container.AddonsConfig { - ac := &container.AddonsConfig{} - for _, name := range addons { - switch strings.ToLower(name) { - case istio: - ac.IstioConfig = &container.IstioConfig{Disabled: false} - case hpa: - ac.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{Disabled: false} - case hlb: - ac.HttpLoadBalancing = &container.HttpLoadBalancing{Disabled: false} - case cloudRun: - ac.CloudRunConfig = &container.CloudRunConfig{Disabled: false} - default: - panic(fmt.Sprintf("addon type %q not supported. Has to be one of: %q", name, []string{istio, hpa, hlb, cloudRun})) - } - } - - return ac -} diff --git a/test/gke/client.go b/test/gke/client.go deleted file mode 100644 index b1713860eb..0000000000 --- a/test/gke/client.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "fmt" - - container "google.golang.org/api/container/v1beta1" - option "google.golang.org/api/option" - - "golang.org/x/net/context" -) - -// SDKOperations wraps GKE SDK related functions -type SDKOperations interface { - CreateCluster(project, region, zone string, req *container.CreateClusterRequest) error - CreateClusterAsync(project, region, zone string, req *container.CreateClusterRequest) (*container.Operation, error) - DeleteCluster(project, region, zone, clusterName string) error - DeleteClusterAsync(project, region, zone, clusterName string) (*container.Operation, error) - GetCluster(project, region, zone, clusterName string) (*container.Cluster, error) - GetOperation(project, region, zone, opName string) (*container.Operation, error) - ListClustersInProject(project string) ([]*container.Cluster, error) -} - -// sdkClient Implement SDKOperations -type sdkClient struct { - *container.Service -} - -// NewSDKClient returns an SDKClient that implements SDKOperations -func NewSDKClient(opts ...option.ClientOption) (SDKOperations, error) { - containerService, err := container.NewService(context.Background(), opts...) - if err != nil { - return nil, fmt.Errorf("failed to create container service: %w", err) - } - return &sdkClient{containerService}, nil -} - -// CreateCluster creates a new GKE cluster, and wait until it finishes or timeout or there is an error. -func (gsc *sdkClient) CreateCluster( - project, region, zone string, - rb *container.CreateClusterRequest, -) error { - op, err := gsc.CreateClusterAsync(project, region, zone, rb) - if err == nil { - err = Wait(gsc, project, region, zone, op.Name, creationTimeout) - } - return err -} - -// CreateClusterAsync creates a new GKE cluster asynchronously. -func (gsc *sdkClient) CreateClusterAsync( - project, region, zone string, - rb *container.CreateClusterRequest, -) (*container.Operation, error) { - location := GetClusterLocation(region, zone) - if zone != "" { - return gsc.Projects.Zones.Clusters.Create(project, location, rb).Context(context.Background()).Do() - } - parent := fmt.Sprintf("projects/%s/locations/%s", project, location) - return gsc.Projects.Locations.Clusters.Create(parent, rb).Context(context.Background()).Do() -} - -// DeleteCluster deletes the GKE cluster, and wait until it finishes or timeout or there is an error. -func (gsc *sdkClient) DeleteCluster(project, region, zone, clusterName string) error { - op, err := gsc.DeleteClusterAsync(project, region, zone, clusterName) - if err == nil { - err = Wait(gsc, project, region, zone, op.Name, deletionTimeout) - } - return err -} - -// DeleteClusterAsync deletes the GKE cluster asynchronously. -func (gsc *sdkClient) DeleteClusterAsync(project, region, zone, clusterName string) (*container.Operation, error) { - location := GetClusterLocation(region, zone) - if zone != "" { - return gsc.Projects.Zones.Clusters.Delete(project, location, clusterName).Context(context.Background()).Do() - } - clusterFullPath := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, clusterName) - return gsc.Projects.Locations.Clusters.Delete(clusterFullPath).Context(context.Background()).Do() -} - -// GetCluster gets the GKE cluster with the given cluster name. -func (gsc *sdkClient) GetCluster(project, region, zone, clusterName string) (*container.Cluster, error) { - location := GetClusterLocation(region, zone) - if zone != "" { - return gsc.Projects.Zones.Clusters.Get(project, location, clusterName).Context(context.Background()).Do() - } - clusterFullPath := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, clusterName) - return gsc.Projects.Locations.Clusters.Get(clusterFullPath).Context(context.Background()).Do() -} - -// ListClustersInProject lists all the GKE clusters created in the given project. -func (gsc *sdkClient) ListClustersInProject(project string) ([]*container.Cluster, error) { - var clusters []*container.Cluster - projectFullPath := fmt.Sprintf("projects/%s/locations/-", project) - resp, err := gsc.Projects.Locations.Clusters.List(projectFullPath).Do() - if err != nil { - return clusters, fmt.Errorf("failed to list clusters under project %s: %w", project, err) - } - return resp.Clusters, nil -} - -// GetOperation gets the operation ref with the given operation name. -func (gsc *sdkClient) GetOperation(project, region, zone, opName string) (*container.Operation, error) { - location := GetClusterLocation(region, zone) - if zone != "" { - return gsc.Service.Projects.Zones.Operations.Get(project, location, opName).Do() - } - opsFullPath := fmt.Sprintf("projects/%s/locations/%s/operations/%s", project, location, opName) - return gsc.Service.Projects.Locations.Operations.Get(opsFullPath).Do() -} diff --git a/test/gke/client_test.go b/test/gke/client_test.go deleted file mode 100644 index 88abbb68dc..0000000000 --- a/test/gke/client_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "os" - "path/filepath" - "testing" - - "google.golang.org/api/option" -) - -const credEnvKey = "GOOGLE_APPLICATION_CREDENTIALS" - -// func NewSDKClient(opts ...option.ClientOption) (SDKOperations, error) { -func TestNewSDKClient(t *testing.T) { - pwd, _ := os.Getwd() - t.Setenv(credEnvKey, filepath.Join(pwd, "fake/credentials.json")) - - datas := []struct { - req option.ClientOption - }{{ - // No options. - nil, - }, { - // One option. - option.WithAPIKey("AIza..."), - }} - for _, data := range datas { - var client SDKOperations - var err error - if data.req == nil { - client, err = NewSDKClient() - } else { - client, err = NewSDKClient(data.req) - } - - if err != nil { - t.Errorf("Expected no error from request '%v', but got '%v'", data.req, err) - } - if client == nil { - t.Error("Expected a valid client, but got nil") - } - } -} diff --git a/test/gke/endpoint.go b/test/gke/endpoint.go deleted file mode 100644 index 586bf72b4f..0000000000 --- a/test/gke/endpoint.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "fmt" - "regexp" -) - -const ( - testEnv = "test" - stagingEnv = "staging" - staging2Env = "staging2" - prodEnv = "prod" - - testEndpoint = "https://test-container.sandbox.googleapis.com/" - stagingEndpoint = "https://staging-container.sandbox.googleapis.com/" - staging2Endpoint = "https://staging2-container.sandbox.googleapis.com/" - prodEndpoint = "https://container.googleapis.com/" -) - -var urlRe = regexp.MustCompile(`https://.*/`) - -// ServiceEndpoint returns the container service endpoint for the given environment. -func ServiceEndpoint(environment string) (string, error) { - var endpoint string - switch env := environment; { - case env == testEnv: - endpoint = testEndpoint - case env == stagingEnv: - endpoint = stagingEndpoint - case env == staging2Env: - endpoint = staging2Endpoint - case env == prodEnv: - endpoint = prodEndpoint - case urlRe.MatchString(env): - endpoint = env - default: - return "", fmt.Errorf("the environment '%s' is invalid, must be one of 'test', 'staging', 'staging2', 'prod', or a custom https:// URL", environment) - } - return endpoint, nil -} diff --git a/test/gke/endpoint_test.go b/test/gke/endpoint_test.go deleted file mode 100644 index fa9e696970..0000000000 --- a/test/gke/endpoint_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import "testing" - -func TestServiceEndpoint(t *testing.T) { - datas := []struct { - env string - want string - errorExpected bool - }{ - {"", "", true}, - {testEnv, testEndpoint, false}, - {stagingEnv, stagingEndpoint, false}, - {staging2Env, staging2Endpoint, false}, - {prodEnv, prodEndpoint, false}, - {"invalid_url", "", true}, - {"https://custom.container.googleapis.com/", "https://custom.container.googleapis.com/", false}, - } - for _, data := range datas { - got, err := ServiceEndpoint(data.env) - if got != data.want { - t.Errorf("Service endpoint for %q = %q, want: %q", - data.env, got, data.want) - } - if err != nil && !data.errorExpected { - t.Error("Error is not expected by got", err) - } - if err == nil && data.errorExpected { - t.Error("Expected one error but got nil") - } - } -} diff --git a/test/gke/fake/client.go b/test/gke/fake/client.go deleted file mode 100644 index c3af1f3c82..0000000000 --- a/test/gke/fake/client.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "errors" - "fmt" - "strconv" - "strings" - "sync" - "time" - - container "google.golang.org/api/container/v1beta1" - "knative.dev/pkg/test/gke" -) - -// Timeout for fake client. -// Need to be changed dynamically in the tests, so make them public. -var ( - CreationTimeout = 1000 * time.Millisecond - DeletionTimeout = 10 * time.Minute -) - -// GKESDKClient is a fake client for unit tests. -type GKESDKClient struct { - // map of parent: clusters slice - clusters map[string][]*container.Cluster - // map of operationID: operation - ops map[string]*container.Operation - - // An incremental number for new ops - opNumber int - // A lookup table for determining ops statuses - OpStatus map[string]string - - mutex sync.Mutex -} - -// NewGKESDKClient returns a new fake gkeSDKClient that can be used in unit tests. -func NewGKESDKClient() *GKESDKClient { - return &GKESDKClient{ - clusters: make(map[string][]*container.Cluster), - ops: make(map[string]*container.Operation), - OpStatus: make(map[string]string), - } -} - -// automatically registers new ops, and mark it "DONE" by default. Update -// fgsc.opStatus by fgsc.opStatus[string(fgsc.opNumber+1)]="PENDING" to make the -// next operation pending -func (fgsc *GKESDKClient) newOp() *container.Operation { - opName := strconv.Itoa(fgsc.opNumber) - op := &container.Operation{ - Name: opName, - Status: "DONE", - } - if status, ok := fgsc.OpStatus[opName]; ok { - op.Status = status - } - fgsc.opNumber++ - fgsc.ops[opName] = op - return op -} - -// CreateCluster creates a new cluster, and wait until it finishes or timeout or there is an error. -func (fgsc *GKESDKClient) CreateCluster( - project, region, zone string, - rb *container.CreateClusterRequest, -) error { - op, err := fgsc.CreateClusterAsync(project, region, zone, rb) - if err == nil { - err = gke.Wait(fgsc, project, region, zone, op.Name, CreationTimeout) - } - return err -} - -// CreateClusterAsync creates a new cluster asynchronously. -func (fgsc *GKESDKClient) CreateClusterAsync( - project, region, zone string, - rb *container.CreateClusterRequest, -) (*container.Operation, error) { - fgsc.mutex.Lock() - defer fgsc.mutex.Unlock() - location := gke.GetClusterLocation(region, zone) - parent := fmt.Sprintf("projects/%s/locations/%s", project, location) - name := rb.Cluster.Name - if cls, ok := fgsc.clusters[parent]; ok { - for _, cl := range cls { - if cl.Name == name { - return nil, errors.New("cluster already exist") - } - } - } else { - fgsc.clusters[parent] = make([]*container.Cluster, 0) - } - cluster := &container.Cluster{ - Name: name, - Location: location, - Status: "RUNNING", - AddonsConfig: rb.Cluster.AddonsConfig, - NodePools: rb.Cluster.NodePools, - } - if rb.Cluster.NodePools != nil { - cluster.NodePools = rb.Cluster.NodePools - } - if rb.Cluster.MasterAuth != nil { - cluster.MasterAuth = &container.MasterAuth{ - Username: rb.Cluster.MasterAuth.Username, - } - } - - fgsc.clusters[parent] = append(fgsc.clusters[parent], cluster) - return fgsc.newOp(), nil -} - -// DeleteCluster deletes the cluster, and wait until it finishes or timeout or there is an error. -func (fgsc *GKESDKClient) DeleteCluster( - project, region, zone, clusterName string, -) error { - op, err := fgsc.DeleteClusterAsync(project, region, zone, clusterName) - if err == nil { - err = gke.Wait(fgsc, project, region, zone, op.Name, DeletionTimeout) - } - return err -} - -// DeleteClusterAsync deletes the cluster asynchronously. -func (fgsc *GKESDKClient) DeleteClusterAsync( - project, region, zone, clusterName string, -) (*container.Operation, error) { - fgsc.mutex.Lock() - defer fgsc.mutex.Unlock() - location := gke.GetClusterLocation(region, zone) - parent := fmt.Sprintf("projects/%s/locations/%s", project, location) - found := -1 - if clusters, ok := fgsc.clusters[parent]; ok { - for i, cluster := range clusters { - if cluster.Name == clusterName { - found = i - } - } - } - if found == -1 { - return nil, fmt.Errorf("cluster %q not found for deletion", clusterName) - } - // Delete this cluster - fgsc.clusters[parent] = append(fgsc.clusters[parent][:found], fgsc.clusters[parent][found+1:]...) - return fgsc.newOp(), nil -} - -// GetCluster gets the cluster with the given settings. -func (fgsc *GKESDKClient) GetCluster(project, region, zone, cluster string) (*container.Cluster, error) { - fgsc.mutex.Lock() - defer fgsc.mutex.Unlock() - location := gke.GetClusterLocation(region, zone) - parent := fmt.Sprintf("projects/%s/locations/%s", project, location) - if cls, ok := fgsc.clusters[parent]; ok { - for _, cl := range cls { - if cl.Name == cluster { - return cl, nil - } - } - } - return nil, fmt.Errorf("cluster not found") -} - -// ListClustersInProject lists all the GKE clusters created in the given project. -func (fgsc *GKESDKClient) ListClustersInProject(project string) ([]*container.Cluster, error) { - fgsc.mutex.Lock() - defer fgsc.mutex.Unlock() - allClusters := make([]*container.Cluster, 0) - projectPath := fmt.Sprintf("projects/%s", project) - for location, cls := range fgsc.clusters { - // If the clusters are under this project - if strings.HasPrefix(location, projectPath) { - allClusters = append(allClusters, cls...) - } - } - return allClusters, nil -} - -// GetOperation gets the operation with the given settings. -func (fgsc *GKESDKClient) GetOperation(project, region, zone, opName string) (*container.Operation, error) { - fgsc.mutex.Lock() - op, ok := fgsc.ops[opName] - fgsc.mutex.Unlock() - if ok { - return op, nil - } - return nil, errors.New(opName + " operation not found") -} diff --git a/test/gke/fake/credentials.json b/test/gke/fake/credentials.json deleted file mode 100644 index 820c27c5a0..0000000000 --- a/test/gke/fake/credentials.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "client_id": "123456", - "client_secret": "123456", - "refresh_token": "123456", - "type": "authorized_user" -} diff --git a/test/gke/location.go b/test/gke/location.go deleted file mode 100644 index 36f99f3515..0000000000 --- a/test/gke/location.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "fmt" - "strings" -) - -// GetClusterLocation returns the location used in GKE operations, given the region and zone. -func GetClusterLocation(region, zone string) string { - if zone != "" { - region = fmt.Sprintf("%s-%s", region, zone) - } - return region -} - -// RegionZoneFromLoc returns the region and the zone, given the location. -func RegionZoneFromLoc(location string) (string, string) { - parts := strings.Split(location, "-") - // zonal location is the form of us-central1-a, and this pattern is - // consistent in all available GCP locations so far, so we are looking for - // location with more than 2 "-" - if len(parts) > 2 { - zone := parts[len(parts)-1] - region := strings.TrimRight(location, "-"+zone) - return region, zone - } - return location, "" -} diff --git a/test/gke/location_test.go b/test/gke/location_test.go deleted file mode 100644 index e6b2230eea..0000000000 --- a/test/gke/location_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import "testing" - -func TestGetClusterLocation(t *testing.T) { - datas := []struct { - region, zone string - want string - }{ - {"a", "b", "a-b"}, - {"a", "", "a"}, - } - for _, data := range datas { - if got := GetClusterLocation(data.region, data.zone); got != data.want { - t.Errorf("Cluster location with region %q and zone %q = %q, want: %q", - data.region, data.zone, got, data.want) - } - } -} - -func TestRegionZoneFromLoc(t *testing.T) { - datas := []struct { - loc string - wantRegion string - wantZone string - }{ - {"a-b-c", "a-b", "c"}, - {"a-b", "a-b", ""}, - {"a", "a", ""}, - {"", "", ""}, - } - for _, data := range datas { - gotRegion, gotZone := RegionZoneFromLoc(data.loc) - if gotRegion != data.wantRegion { - t.Errorf("Cluster region from location %q = %q, want: %q", - data.loc, gotRegion, data.wantRegion) - } - if gotZone != data.wantZone { - t.Errorf("Cluster zone from location %q = %q, want: %q", - data.loc, gotZone, data.wantZone) - } - } -} diff --git a/test/gke/request.go b/test/gke/request.go deleted file mode 100644 index 5ff2cc1f17..0000000000 --- a/test/gke/request.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "errors" - - container "google.golang.org/api/container/v1beta1" -) - -const defaultGKEVersion = "latest" - -// Request contains all settings collected for cluster creation -type Request struct { - // Project: name of the gcloud project for the cluster - Project string - - // GKEVersion: GKE version of the cluster, default to be latest if not provided - GKEVersion string - - // ReleaseChannel: GKE release channel. Only one of GKEVersion or ReleaseChannel can be - // specified at a time. - // https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels - ReleaseChannel string - - // ClusterName: name of the cluster - ClusterName string - - // MinNodes: the minimum number of nodes of the cluster - MinNodes int64 - - // MaxNodes: the maximum number of nodes of the cluster - MaxNodes int64 - - // NodeType: node type of the cluster, e.g. e2-standard-4, e2-standard-8 - NodeType string - - // Region: region of the cluster, e.g. us-west1, us-central1 - Region string - - // Zone: default is none, must be provided together with region - Zone string - - // Addons: cluster addons to be added to cluster, such as istio - Addons []string - - // EnableWorkloadIdentity: whether to enable Workload Identity for this cluster - EnableWorkloadIdentity bool - - // ServiceAccount: service account that will be used on this cluster - ServiceAccount string -} - -// DeepCopy will make a deepcopy of the request struct. -func (r *Request) DeepCopy() *Request { - return &Request{ - Project: r.Project, - GKEVersion: r.GKEVersion, - ReleaseChannel: r.ReleaseChannel, - ClusterName: r.ClusterName, - MinNodes: r.MinNodes, - MaxNodes: r.MaxNodes, - NodeType: r.NodeType, - Region: r.Region, - Zone: r.Zone, - Addons: r.Addons, - EnableWorkloadIdentity: r.EnableWorkloadIdentity, - ServiceAccount: r.ServiceAccount, - } -} - -// NewCreateClusterRequest returns a new CreateClusterRequest that can be used in gcloud SDK. -func NewCreateClusterRequest(request *Request) (*container.CreateClusterRequest, error) { - if request.ClusterName == "" { - return nil, errors.New("cluster name cannot be empty") - } - if request.MinNodes <= 0 { - return nil, errors.New("min nodes must be larger than 1") - } - if request.MinNodes > request.MaxNodes { - return nil, errors.New("min nodes cannot be larger than max nodes") - } - if request.NodeType == "" { - return nil, errors.New("node type cannot be empty") - } - if request.EnableWorkloadIdentity && request.Project == "" { - return nil, errors.New("project cannot be empty if you want Workload Identity") - } - if request.GKEVersion != "" && request.ReleaseChannel != "" { - return nil, errors.New("can only specify one of GKE version or release channel (not both)") - } - - ccr := &container.CreateClusterRequest{ - Cluster: &container.Cluster{ - NodePools: []*container.NodePool{ - { - Name: "default-pool", - InitialNodeCount: request.MinNodes, - Autoscaling: &container.NodePoolAutoscaling{ - Enabled: true, - MinNodeCount: request.MinNodes, - MaxNodeCount: request.MaxNodes, - }, - Config: &container.NodeConfig{ - MachineType: request.NodeType, - // The set of Google API scopes to be made available on all - // of the node VMs under the "default" service account. - // If unspecified, no scopes are added, unless Cloud Logging or - // Cloud Monitoring are enabled, in which case their required - // scopes will be added. - // `https://www.googleapis.com/auth/devstorage.read_only` is required - // for communicating with **gcr.io**, and it's included in cloud-platform scope. - // TODO(chizhg): give more fine granular scope based on the actual needs. - OauthScopes: []string{container.CloudPlatformScope}, - }, - }, - }, - Name: request.ClusterName, - // Installing addons after cluster creation takes at least 5 - // minutes, so install addons as part of cluster creation, which - // doesn't seem to add much time on top of cluster creation - AddonsConfig: GetAddonsConfig(request.Addons), - // Equivalent to --enable-basic-auth, so that user:pass can be - // later on retrieved for setting up cluster roles. Use the - // default username from gcloud command, the password will be - // automatically generated by GKE SDK - MasterAuth: &container.MasterAuth{Username: "admin"}, - }, - } - if request.EnableWorkloadIdentity { - // Equivalent to --identity-namespace=[PROJECT_ID].svc.id.goog, then - // we can configure a Kubernetes service account to act as a Google - // service account. - ccr.Cluster.WorkloadIdentityConfig = &container.WorkloadIdentityConfig{ - IdentityNamespace: request.Project + ".svc.id.goog", - } - } - if request.ServiceAccount != "" { - // The Google Cloud Platform Service Account to be used by the node VMs. - // If a service account is specified, the cloud-platform and userinfo.email scopes are used. - // If no Service Account is specified, the project default service account is used. - ccr.Cluster.NodePools[0].Config.ServiceAccount = request.ServiceAccount - } - - // Manage the GKE cluster version. Only one of initial cluster version or release channel can be specified. - if request.ReleaseChannel != "" { - ccr.Cluster.ReleaseChannel = &container.ReleaseChannel{Channel: request.ReleaseChannel} - } else if request.GKEVersion != "" { - ccr.Cluster.InitialClusterVersion = request.GKEVersion - } else { - // The default cluster version is not latest, has to explicitly - // set it as "latest" - ccr.Cluster.InitialClusterVersion = defaultGKEVersion - } - return ccr, nil -} diff --git a/test/gke/request_test.go b/test/gke/request_test.go deleted file mode 100644 index 8b8eb8ad4f..0000000000 --- a/test/gke/request_test.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import "testing" - -func TestNewCreateClusterRequest(t *testing.T) { - datas := []struct { - req *Request - errorExpected bool - }{ - { - req: &Request{ - Project: "project-a", - ClusterName: "name-a", - MinNodes: 1, - MaxNodes: 1, - NodeType: "n1-standard-4", - Addons: []string{"Istio"}, - }, - errorExpected: false, - }, { - req: &Request{ - Project: "project-b", - ClusterName: "name-b", - MinNodes: 10, - MaxNodes: 10, - NodeType: "n1-standard-8", - Addons: []string{"HorizontalPodAutoscaling", "HttpLoadBalancing", "CloudRun"}, - }, - errorExpected: false, - }, - { - req: &Request{ - Project: "project-b", - ClusterName: "name-b", - MinNodes: 10, - MaxNodes: 10, - NodeType: "n1-standard-8", - Addons: []string{"HorizontalPodAutoscaling", "HttpLoadBalancing", "CloudRun"}, - ReleaseChannel: "rapid", - }, - errorExpected: false, - }, - { - req: &Request{ - Project: "project-b", - ClusterName: "name-b", - GKEVersion: "1-2-3", - MinNodes: 10, - MaxNodes: 10, - NodeType: "n1-standard-8", - Addons: []string{"HorizontalPodAutoscaling", "HttpLoadBalancing", "CloudRun"}, - ReleaseChannel: "rapid", - }, - errorExpected: true, - }, - { - req: &Request{ - Project: "project-c", - GKEVersion: "1-2-3", - MinNodes: 1, - MaxNodes: 1, - NodeType: "n1-standard-4", - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-d", - GKEVersion: "1-2-3", - ClusterName: "name-d", - MinNodes: 0, - MaxNodes: 1, - NodeType: "n1-standard-4", - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-e", - GKEVersion: "1-2-3", - ClusterName: "name-e", - MinNodes: 10, - MaxNodes: 1, - NodeType: "n1-standard-4", - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-f", - GKEVersion: "1-2-3", - ClusterName: "name-f", - MinNodes: 1, - MaxNodes: 1, - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-d", - GKEVersion: "1-2-3", - ClusterName: "name-d", - MinNodes: 0, - MaxNodes: 1, - NodeType: "n1-standard-4", - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-e", - GKEVersion: "1-2-3", - ClusterName: "name-e", - MinNodes: 10, - MaxNodes: 1, - NodeType: "n1-standard-4", - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-f", - GKEVersion: "1-2-3", - ClusterName: "name-f", - MinNodes: 1, - MaxNodes: 1, - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-g", - GKEVersion: "1-2-3", - ClusterName: "name-g", - MinNodes: 1, - MaxNodes: 1, - NodeType: "n1-standard-4", - EnableWorkloadIdentity: true, - }, - errorExpected: false, - }, { - req: &Request{ - GKEVersion: "1-2-3", - ClusterName: "name-h", - MinNodes: 3, - MaxNodes: 3, - NodeType: "n1-standard-4", - EnableWorkloadIdentity: true, - }, - errorExpected: true, - }, { - req: &Request{ - Project: "project-i", - GKEVersion: "1-2-3", - ClusterName: "name-i", - MinNodes: 3, - MaxNodes: 3, - NodeType: "n1-standard-4", - ServiceAccount: "sa-i", - }, - errorExpected: false, - }} - for _, data := range datas { - createReq, err := NewCreateClusterRequest(data.req) - if data.errorExpected { - if err == nil { - t.Errorf("Expected error from request '%v', but got nil", data.req) - } - } else { - if err != nil { - t.Errorf("Expected no error from request '%v', but got '%v'", data.req, err) - } - if createReq == nil { - t.Error("Expected a valid request, but got nil") - } - } - } -} diff --git a/test/gke/wait.go b/test/gke/wait.go deleted file mode 100644 index b6226c31a3..0000000000 --- a/test/gke/wait.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gke - -import ( - "errors" - "fmt" - "time" - - container "google.golang.org/api/container/v1beta1" -) - -// These are arbitrary numbers determined based on past experience -var ( - creationTimeout = 20 * time.Minute - deletionTimeout = 10 * time.Minute -) - -const ( - pendingStatus = "PENDING" - runningStatus = "RUNNING" - doneStatus = "DONE" -) - -// Wait depends on unique opName(operation ID created by cloud), and waits until -// it's done -func Wait(gsc SDKOperations, project, region, zone, opName string, wait time.Duration) error { - var op *container.Operation - var err error - - timeout := time.After(wait) - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - for { - select { - // Got a timeout! fail with a timeout error - case <-timeout: - return errors.New("timed out waiting") - case <-ticker.C: - // Retry 3 times in case of weird network error, or rate limiting - for r, w := 0, 50*time.Microsecond; r < 3; r, w = r+1, w*2 { - op, err = gsc.GetOperation(project, region, zone, opName) - if err == nil { - if op.Status == doneStatus { - return nil - } else if op.Status == pendingStatus || op.Status == runningStatus { - // Valid operation, no need to retry - break - } else { - // Have seen intermittent error state and fixed itself, - // let it retry to avoid too much flakiness - err = fmt.Errorf("unexpected operation status: %q", op.Status) - } - } - time.Sleep(w) - } - // If err still persist after retries, exit - if err != nil { - return err - } - } - } -} diff --git a/test/interactive/command.go b/test/interactive/command.go deleted file mode 100644 index edea81ab4f..0000000000 --- a/test/interactive/command.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Helper functions for running interactive CLI sessions from Go -package interactive - -import ( - "io" - "log" - "os" - "os/exec" - "strings" -) - -// could be public so extensions of this can test themselves -var defaultRun func(*exec.Cmd) error - -func init() { - defaultRun = func(c *exec.Cmd) error { return c.Run() } -} - -// Command represents running an executable, to make it easy to write interactive "shell scripts" in Go. -// This routes standard in, out, and error from and to the calling program, intended usually to be a login shell run by a person. -type Command struct { - Name string - Args []string - // If LogFile is set, anything send to stdout and stderr is tee'd to the file named in LogFile when .Run() is called. - LogFile string - run func(*exec.Cmd) error -} - -// NewCommand creates an Command, breaking out .Name and .Args for you -func NewCommand(cmdAndArgs ...string) Command { - if len(cmdAndArgs) < 1 { - log.Fatal("NewCommand must have a least the command given") - } - return Command{cmdAndArgs[0], cmdAndArgs[1:], "", defaultRun} -} - -// Run executes the command with exec.Command(), sets standard in/out/err, and returns the result of exec.Cmd.Run() -// Will tee stdout&err to .LogFile if set -func (c Command) Run() error { - cmd := exec.Command(c.Name, c.Args...) - cmd.Stdin = os.Stdin - if len(c.LogFile) > 0 { - f, err := os.OpenFile(c.LogFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - log.Fatal(err) - } - cmd.Stdout = io.MultiWriter(os.Stdout, f) - cmd.Stderr = io.MultiWriter(os.Stderr, f) - } else { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - return c.run(cmd) -} - -// String conforms to Stringer -func (c Command) String() string { - s := c.Name + " " + strings.Join(c.Args, " ") - return "Command to run: " + s -} - -// AddArgs appends arguments to the current list of args -func (c *Command) AddArgs(args ...string) { - c.Args = append(c.Args, args...) -} diff --git a/test/interactive/command_test.go b/test/interactive/command_test.go deleted file mode 100644 index 9ff3102172..0000000000 --- a/test/interactive/command_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interactive - -import ( - "os/exec" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -var ( - mySpew *spew.ConfigState - arbitraryArgs []string -) - -func init() { - mySpew = spew.NewDefaultConfig() - mySpew.DisableMethods = true - arbitraryArgs = []string{"docker", "run", "something"} -} - -func argsValidator(t *testing.T, args []string) func(c *exec.Cmd) error { - return func(c *exec.Cmd) error { - for i, arg := range arbitraryArgs { - if c.Args[i] != arg { - t.Errorf("c.Args[%d] not correct", i) - } - } - t.Log("Validated c.Args") - if t.Failed() { - mySpew.Sdump(*c) - } - return nil - } -} - -func TestNewCommand(t *testing.T) { - cmd := NewCommand(arbitraryArgs...) - cmd.run = argsValidator(t, arbitraryArgs) - cmd.Run() -} - -func TestAddArgs(t *testing.T) { - cmd := NewCommand(arbitraryArgs[0]) - cmd.AddArgs(arbitraryArgs[1:]...) - cmd.run = argsValidator(t, arbitraryArgs) - cmd.Run() -} - -func TestCommandStringer(t *testing.T) { - t.Log(NewCommand("true")) -} diff --git a/test/interactive/docker.go b/test/interactive/docker.go deleted file mode 100644 index b0ef91ebe0..0000000000 --- a/test/interactive/docker.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Helper functions for running interactive CLI sessions from Go -package interactive - -import ( - "fmt" - "log" - "os" - "path" - "strings" -) - -var defaultDockerCommands []string - -func init() { - defaultDockerCommands = []string{"docker", "run", "-it", "--rm", "--entrypoint", "bash"} -} - -// Env represents a collection of environment variables and their values -type Env map[string]string - -// Docker is mostly an Command preloaded with arguments which setup Docker for running an image interactively. -type Docker struct { - Command -} - -// PromoteFromEnv pulls the named environment variables from the environment and puts them in the Env. -// It does not stop on error and returns an error listing all the failed values -func (e Env) PromoteFromEnv(envVars ...string) error { - var err error - for _, env := range envVars { - v := os.Getenv(env) - if v == "" { - err = fmt.Errorf("environment variable %q is not set; %w", env, err) - } else { - e[env] = v - } - } - return err -} - -// NewDocker creates a Docker instance with default Docker command arguments for running interactively. -func NewDocker() Docker { - return Docker{NewCommand(defaultDockerCommands...)} -} - -// AddEnv adds arguments so all the environment variables present in e become part of the docker run's environment -func (d *Docker) AddEnv(e Env) { - for k, v := range e { - d.AddArgs("-e", fmt.Sprintf("%s=%s", k, v)) - } -} - -// AddMount add arguments for the --mount command -func (d *Docker) AddMount(typeStr, source, target string, optAdditionalArgs ...string) { - addl := "" - if len(optAdditionalArgs) != 0 { - addl = "," + strings.Join(optAdditionalArgs, ",") - } - d.AddArgs("--mount", fmt.Sprintf("type=%s,source=%s,target=%s%s", typeStr, source, target, addl)) -} - -// AddRWOverlay mounts a directory into the image at the desired location, but with an overlay -// -// so internal changes do not modify the external directory. -// -// externalDirectory probably needs to be an absolute path -// Returns a function to clean up the mount (but does not delete the directory). -// Uses sudo and probably only works on Linux -func (d *Docker) AddRWOverlay(externalDirectory, internalDirectory string) func() { - tmpDir, err := os.MkdirTemp("", "overlay") - if err != nil { - log.Fatal(err) - } - subDirs := []string{"upper", "work", "overlay"} - for _, d := range subDirs { - err = os.Mkdir(path.Join(tmpDir, d), 0777) - if err != nil { - log.Fatal(err) - } - } - overlayDir := path.Join(tmpDir, "overlay") - // The options for overlay mount are confusing - // You need empty directories for upper and work (and overlay, though you can mount over directories that have files in them if you *want* to...) - mount := NewCommand("sudo", "mount", "-t", "overlay", "-o", - fmt.Sprintf("lowerdir=%s,upperdir=%s/upper,workdir=%s/work", externalDirectory, tmpDir, tmpDir), - "none", overlayDir) - // Print command to run so user knows why it is asking for sudo password (if it does) - log.Println(mount) - if err = mount.Run(); err != nil { - log.Fatal("Unable to create overlay mount, so giving up: ", err) - } - d.AddMount("bind", overlayDir, internalDirectory) - return func() { - // Print command to run so user knows why it is asking for sudo password (if it does) - umount := NewCommand("sudo", "umount", overlayDir) - log.Println(umount) - umount.Run() - } -} diff --git a/test/interactive/docker_test.go b/test/interactive/docker_test.go deleted file mode 100644 index b43f4413fc..0000000000 --- a/test/interactive/docker_test.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interactive - -import ( - "os" - "os/exec" - "testing" -) - -func TestEnv(t *testing.T) { - e := make(Env) - err := e.PromoteFromEnv("PWD") - if err != nil { - t.Error(err) - t.Logf("The Environ is:\n%+v\n", os.Environ()) - } - epwd, exists := e["PWD"] - if !exists || epwd != os.Getenv("PWD") { - t.Errorf(`$PWD promotion did not occur correctly: Env='%v'; os.Getenv("PWD")=%s`, e, os.Getenv("PWD")) - } - badName := "GEEZ_I_REALLY_H0PE_TH1S_DOES_NOT_EXIST" - // and just in case: - for { - _, exists := os.LookupEnv(badName) - if !exists { - break - } - badName += "z" - } - err = e.PromoteFromEnv(badName) - if err == nil { - t.Error("Should have received error promoting non-existent variable") - } -} - -func TestAddEnv(t *testing.T) { - d := NewDocker() - e := make(Env) - l := len(defaultDockerCommands) - e["env"] = "var" - d.AddEnv(e) - d.run = func(c *exec.Cmd) error { - if c.Args[l] != "-e" || c.Args[l+1] != "env=var" { - t.Error("Env var wasn't added correctly") - t.Log(mySpew.Sdump(c)) - } - t.Log("Test Over") - return nil - } - d.Run() -} - -func TestAddMount(t *testing.T) { - d := NewDocker() - l := len(defaultDockerCommands) - d.AddMount("bind", "mysource", "mytarget", "other1=banana1", "other2=banana2") - d.run = func(c *exec.Cmd) error { - if c.Args[l] != "--mount" || c.Args[l+1] != "type=bind,source=mysource,target=mytarget,other1=banana1,other2=banana2" { - t.Error("Mount wasn't added correctly") - t.Log(mySpew.Sdump(c)) - } - t.Log("Test Over") - return nil - } - d.Run() -} diff --git a/test/monitoring/doc.go b/test/monitoring/doc.go deleted file mode 100644 index 2d5a313437..0000000000 --- a/test/monitoring/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package monitoring provides common methods for all the monitoring components used in the tests - -This package exposes following methods: - - CheckPortAvailability(port int) error - Checks if the given port is available - GetPods(kubeClientset kubernetes.Interface, app string) (*v1.PodList, error) - Gets the list of pods that satisfy the label selector app= - Cleanup(pid int) error - Kill the current port forwarding process running in the background - PortForward(logf logging.FormatLogger, podList *v1.PodList, localPort, remotePort int) (int, error) - Create a background process that will port forward the first pod from the local to remote port - It returns the process id for the background process created. -*/ -package monitoring diff --git a/test/monitoring/monitoring.go b/test/monitoring/monitoring.go deleted file mode 100644 index 54d1fe20d3..0000000000 --- a/test/monitoring/monitoring.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package monitoring - -import ( - "context" - "fmt" - "net" - "os" - "os/exec" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "knative.dev/pkg/test/logging" -) - -// CheckPortAvailability checks to see if the port is available on the machine. -func CheckPortAvailability(port int) error { - server, err := net.Listen("tcp", fmt.Sprint(":", port)) - if err != nil { - // Port is likely taken - return err - } - return server.Close() -} - -// GetPods retrieves the current existing podlist for the app in monitoring namespace -// This uses app= as labelselector for selecting pods -func GetPods(ctx context.Context, kubeClientset kubernetes.Interface, app, namespace string) (*v1.PodList, error) { - pods, err := kubeClientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=" + app}) - if err == nil && len(pods.Items) == 0 { - err = fmt.Errorf("pod %s not found on the cluster. Ensure monitoring is switched on for your Knative Setup", app) - } - return pods, err -} - -// Cleanup will clean the background process used for port forwarding -func Cleanup(pid int) error { - ps := os.Process{Pid: pid} - if err := ps.Kill(); err != nil { - return err - } - - errCh := make(chan error) - go func() { - _, err := ps.Wait() - errCh <- err - }() - - select { - case err := <-errCh: - return err - case <-time.After(30 * time.Second): - return fmt.Errorf("timed out waiting for process %d to exit", pid) - } -} - -// PortForward sets up local port forward to the pod specified by the "app" label in the given namespace -func PortForward(logf logging.FormatLogger, podList *v1.PodList, localPort, remotePort int, namespace string) (int, error) { - podName := podList.Items[0].Name - cmd := exec.Command("kubectl", "port-forward", podName, fmt.Sprintf("%d:%d", localPort, remotePort), "-n", namespace) - if err := cmd.Start(); err != nil { - return 0, fmt.Errorf("failed to port forward: %w", err) - } - - logf("Running %s port-forward in background, pid = %d", podName, cmd.Process.Pid) - return cmd.Process.Pid, nil -} diff --git a/test/prometheus/prometheus.go b/test/prometheus/prometheus.go deleted file mode 100644 index 5d7ccf0bd3..0000000000 --- a/test/prometheus/prometheus.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package prometheus - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/prometheus/client_golang/api" - v1 "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/prometheus/common/model" - "k8s.io/client-go/kubernetes" - "knative.dev/pkg/test/logging" - "knative.dev/pkg/test/monitoring" -) - -const ( - prometheusPort = 9090 - appLabel = "prometheus" -) - -var ( - // sync.Once variable to ensure we execute zipkin setup only once. - setupOnce sync.Once - - // sync.Once variable to ensure we execute zipkin cleanup only if zipkin is setup and it is executed only once. - teardownOnce sync.Once -) - -// PromProxy defines a proxy to the prometheus server -type PromProxy struct { - Namespace string - processID int -} - -// Setup performs a port forwarding for app prometheus-test in given namespace -func (p *PromProxy) Setup(ctx context.Context, kubeClientset kubernetes.Interface, logf logging.FormatLogger) { - setupOnce.Do(func() { - if err := monitoring.CheckPortAvailability(prometheusPort); err != nil { - logf("Prometheus port not available: %v", err) - return - } - - promPods, err := monitoring.GetPods(ctx, kubeClientset, appLabel, p.Namespace) - if err != nil { - logf("Error retrieving prometheus pod details: %v", err) - return - } - - p.processID, err = monitoring.PortForward(logf, promPods, prometheusPort, prometheusPort, p.Namespace) - if err != nil { - logf("Error starting kubectl port-forward command: %v", err) - return - } - }) -} - -// Teardown will kill the port forwarding process if running. -func (p *PromProxy) Teardown(logf logging.FormatLogger) { - teardownOnce.Do(func() { - if err := monitoring.Cleanup(p.processID); err != nil { - logf("Encountered error killing port-forward process: %v", err) - return - } - }) -} - -// PromAPI gets a handle to the prometheus API -func PromAPI() (v1.API, error) { - client, err := api.NewClient(api.Config{Address: fmt.Sprintf("http://localhost:%d", prometheusPort)}) - if err != nil { - return nil, err - } - return v1.NewAPI(client), nil -} - -// AllowPrometheusSync sleeps for sometime to allow prometheus time to scrape the metrics. -func AllowPrometheusSync(logf logging.FormatLogger) { - logf("Sleeping to allow prometheus to record metrics...") - time.Sleep(30 * time.Second) -} - -// RunQuery runs a prometheus query and returns the metric value -func RunQuery(ctx context.Context, logf logging.FormatLogger, promAPI v1.API, query string) (float64, error) { - logf("Running prometheus query: %s", query) - - value, _, err := promAPI.Query(ctx, query, time.Now()) - if err != nil { - return 0, err - } - - return VectorValue(value) -} - -// RunQueryRange runs a prometheus query over the given range -func RunQueryRange(ctx context.Context, logf logging.FormatLogger, promAPI v1.API, query string, r v1.Range) (float64, error) { - logf("Running prometheus query: %s", query) - - value, _, err := promAPI.QueryRange(ctx, query, r) - if err != nil { - return 0, err - } - - return VectorValue(value) -} - -// VectorValue gets the vector value from the value type -func VectorValue(val model.Value) (float64, error) { - if val.Type() != model.ValVector { - return 0, fmt.Errorf("value type is %s. Expected: Valvector", val.String()) - } - value := val.(model.Vector) - if len(value) == 0 { - return 0, errors.New("query returned no results") - } - - return float64(value[0].Value), nil -} diff --git a/test/prometheus/prometheus_test.go b/test/prometheus/prometheus_test.go deleted file mode 100644 index 9822ec1be5..0000000000 --- a/test/prometheus/prometheus_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package prometheus_test - -import ( - "context" - "testing" - "time" - - v1 "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/prometheus/common/model" - "knative.dev/pkg/test/prometheus" -) - -const ( - expected = 1.0 - query = "test" - duration = 10 * time.Second -) - -type testPromAPI struct { - v1.API -} - -// Query performs a query on the prom api -func (*testPromAPI) Query(c context.Context, query string, ts time.Time, opts ...v1.Option) (model.Value, v1.Warnings, error) { - - s := model.Sample{Value: expected} - var v []*model.Sample - v = append(v, &s) - - return model.Vector(v), nil, nil -} - -// QueryRange performs a query for the given range. -func (*testPromAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { - s := model.Sample{Value: expected} - var v []*model.Sample - v = append(v, &s) - - return model.Vector(v), nil, nil -} - -func TestRunQuery(t *testing.T) { - r, err := prometheus.RunQuery(context.Background(), t.Logf, &testPromAPI{}, query) - if err != nil { - t.Fatal("Error running query:", err) - } - if r != expected { - t.Fatalf("Want: %f Got: %f", expected, r) - } -} - -func TestRunQueryRange(t *testing.T) { - r := v1.Range{Start: time.Now(), End: time.Now().Add(duration)} - val, err := prometheus.RunQueryRange(context.Background(), t.Logf, &testPromAPI{}, query, r) - if err != nil { - t.Fatal("Error running query:", err) - } - if val != expected { - t.Fatalf("Want: %f Got: %f", expected, val) - } -} diff --git a/test/prow/env.go b/test/prow/env.go deleted file mode 100644 index a36b7a1d66..0000000000 --- a/test/prow/env.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// env.go provides a central point to read all environment variables defined by Prow. - -package prow - -import ( - "errors" - "fmt" - - "github.com/kelseyhightower/envconfig" -) - -// EnvConfig consists of all the environment variables that can be set in a Prow job, -// check https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables -// for more information. -type EnvConfig struct { - CI bool - Artifacts string - JobName string `split_words:"true"` - JobType string `split_words:"true"` - JobSpec string `split_words:"true"` - BuildID string `envconfig:"BUILD_ID"` - ProwJobID string `envconfig:"PROW_JOB_ID"` - RepoOwner string `split_words:"true"` - RepoName string `split_words:"true"` - PullBaseRef string `split_words:"true"` - PullBaseSha string `split_words:"true"` - PullRefs string `split_words:"true"` - PullNumber uint `split_words:"true"` - PullPullSha string `split_words:"true"` -} - -// GetEnvConfig returns values of all the environment variables that can be possibly set in a Prow job. -func GetEnvConfig() (*EnvConfig, error) { - var ec EnvConfig - if err := envconfig.Process("", &ec); err != nil { - return nil, fmt.Errorf("failed getting environment variables for Prow: %w", err) - } - - if !ec.CI { - return nil, errors.New("this function is not expected to be called from a non-CI environment") - } - return &ec, nil -} diff --git a/test/prow/prow.go b/test/prow/prow.go index c5304ce27e..341a3b0871 100644 --- a/test/prow/prow.go +++ b/test/prow/prow.go @@ -19,326 +19,24 @@ limitations under the License. package prow -import ( - "bufio" - "context" - "encoding/json" - "log" - "os" - "path" - "sort" - "strconv" - "strings" - - "knative.dev/pkg/test/gcs" -) +import "knative.dev/pkg/test/ci" const ( - // BucketName is the gcs bucket for all knative builds - BucketName = "knative-prow" - // Latest is the filename storing latest build number - Latest = "latest-build.txt" - // BuildLog is the filename for build log - BuildLog = "build-log.txt" - // StartedJSON is the json file containing build started info - StartedJSON = "started.json" - // FinishedJSON is the json file containing build finished info - FinishedJSON = "finished.json" // ArtifactsDir is the dir containing artifacts - ArtifactsDir = "artifacts" - - // PresubmitJob means it runs on unmerged PRs. - PresubmitJob = "presubmit" - // PostsubmitJob means it runs on each new commit. - PostsubmitJob = "postsubmit" - // PeriodicJob means it runs on a time-basis, unrelated to git changes. - PeriodicJob = "periodic" - // BatchJob tests multiple unmerged PRs at the same time. - BatchJob = "batch" + // + // Deprecated: use knative.dev/pkg/test/ci.ArtifactsDir + ArtifactsDir = ci.ArtifactsDir ) -// defined here so that it can be mocked for unit testing -var logFatalf = log.Fatalf -var ctx = context.Background() -var client *gcs.GCSClient - -// Job struct represents a job directory in gcs. -// gcs job StoragePath will be derived from Type if it's defined, -type Job struct { - Name string - Type string - Bucket string // optional - Org string // optional - Repo string // optional - StoragePath string // optional - PullID int // only for Presubmit jobs - Builds []Build // optional -} - -// Build points to a build stored under a particular gcs path. -type Build struct { - JobName string - StoragePath string - BuildID int - Bucket string // optional - StartTime *int64 - FinishTime *int64 -} - -// Started holds the started.json values of the build. -type Started struct { - Timestamp int64 `json:"timestamp"` // epoch seconds - RepoVersion string `json:"repo-version"` - Node string `json:"node"` - Pull string `json:"pull"` - Repos map[string]string `json:"repos"` // {repo: branch_or_pull} map -} - -// Finished holds the finished.json values of the build -type Finished struct { - // Timestamp is epoch seconds - Timestamp int64 `json:"timestamp"` - Passed bool `json:"passed"` - JobVersion string `json:"job-version"` - Metadata Metadata `json:"metadata"` -} - -// Metadata contains metadata in finished.json -type Metadata map[string]interface{} - -// IsCI returns whether the current environment is a CI environment. -func IsCI() bool { - return strings.EqualFold(os.Getenv("CI"), "true") -} - -/* Local logics */ - -// GetLocalArtifactsDir gets the artifacts directory where prow looks for artifacts. -// By default, it will look at the env var ARTIFACTS. -func GetLocalArtifactsDir() string { - dir := os.Getenv("ARTIFACTS") - if dir == "" { - log.Printf("Env variable ARTIFACTS not set. Using %s instead.", ArtifactsDir) - dir = ArtifactsDir - } - return dir -} - -/* GCS related logics */ - -// Initialize wraps gcs authentication, have to be invoked before any other functions -func Initialize(serviceAccount string) error { - var err error - client, err = gcs.NewClient(ctx, serviceAccount) - return err -} - -// NewJob creates new job struct -// pullID is only saved by Presubmit job for determining StoragePath -func NewJob(jobName, jobType, orgName, repoName string, pullID int) *Job { - job := Job{ - Name: jobName, - Type: jobType, - Bucket: BucketName, - Org: orgName, - Repo: repoName, - } - - switch jobType { - case PeriodicJob, PostsubmitJob: - job.StoragePath = path.Join("logs", jobName) - case PresubmitJob: - job.PullID = pullID - job.StoragePath = path.Join("pr-logs", "pull", orgName+"_"+repoName, strconv.Itoa(pullID), jobName) - case BatchJob: - job.StoragePath = path.Join("pr-logs", "pull", "batch", jobName) - default: - logFatalf("unknown job spec type: %v", jobType) - } - return &job -} - -// PathExists checks if the storage path of a job exists in gcs or not -func (j *Job) PathExists() bool { - return client.Exists(ctx, BucketName, j.StoragePath) -} - -// GetLatestBuildNumber gets the latest build number for job -func (j *Job) GetLatestBuildNumber() (int, error) { - logFilePath := path.Join(j.StoragePath, Latest) - contents, err := client.ReadObject(ctx, BucketName, logFilePath) - if err != nil { - return 0, err - } - latestBuild, err := strconv.Atoi(strings.TrimSuffix(string(contents), "\n")) - if err != nil { - return 0, err - } - return latestBuild, nil -} - -// NewBuild gets build struct based on job info -// No gcs operation is performed by this function -func (j *Job) NewBuild(buildID int) *Build { - build := Build{ - Bucket: BucketName, - JobName: j.Name, - StoragePath: path.Join(j.StoragePath, strconv.Itoa(buildID)), - BuildID: buildID, - } - - if startTime, err := build.GetStartTime(); err == nil { - build.StartTime = &startTime - } - if finishTime, err := build.GetFinishTime(); err == nil { - build.FinishTime = &finishTime - } - return &build -} - -// GetFinishedBuilds gets all builds that have finished, -// by looking at existence of "finished.json" file -func (j *Job) GetFinishedBuilds() []Build { - var finishedBuilds []Build - builds := j.GetBuilds() - for _, build := range builds { - if build.IsFinished() { - finishedBuilds = append(finishedBuilds, build) - } - } - return finishedBuilds -} - -// GetBuilds gets all builds from this job on gcs, precomputes start/finish time of builds -// by parsing "Started.json" and "Finished.json" on gcs, could be very expensive if there are -// large number of builds -func (j *Job) GetBuilds() []Build { - buildIDs := j.GetBuildIDs() - builds := make([]Build, 0, len(buildIDs)) - for _, ID := range buildIDs { - builds = append(builds, *j.NewBuild(ID)) - } - return builds -} - -// GetBuildIDs gets all build IDs from this job on gcs, scans all direct child of gcs directory -// for job, keeps the ones that can be parsed as integer -func (j *Job) GetBuildIDs() []int { - var buildIDs []int - gcsBuildPaths, _ := client.ListDirectChildren(ctx, j.Bucket, j.StoragePath) - for _, gcsBuildPath := range gcsBuildPaths { - if buildID, err := getBuildIDFromBuildPath(gcsBuildPath); err == nil { - buildIDs = append(buildIDs, buildID) - } - } - return buildIDs -} - -// GetLatestBuilds get latest builds from gcs, sort by start time from newest to oldest, -// will return count number of builds -func (j *Job) GetLatestBuilds(count int) []Build { - // The timestamp of gcs directories are not usable, - // as they are all set to '0001-01-01 00:00:00 +0000 UTC', - // so use 'started.json' creation date for latest builds - builds := j.GetFinishedBuilds() - sort.Slice(builds, func(i, j int) bool { - if builds[i].StartTime == nil { - return false - } - if builds[j].StartTime == nil { - return true - } - return *builds[i].StartTime > *builds[j].StartTime - }) - if len(builds) < count { - return builds - } - return builds[:count] -} - -// IsStarted check if build has started by looking at "started.json" file -func (b *Build) IsStarted() bool { - return client.Exists(ctx, BucketName, path.Join(b.StoragePath, StartedJSON)) -} - -// IsFinished check if build has finished by looking at "finished.json" file -func (b *Build) IsFinished() bool { - return client.Exists(ctx, BucketName, path.Join(b.StoragePath, FinishedJSON)) -} - -// GetStartTime gets started timestamp of a build, -// returning -1 if the build didn't start or if it failed to get the timestamp -func (b *Build) GetStartTime() (int64, error) { - var started Started - if err := unmarshalJSONFile(path.Join(b.StoragePath, StartedJSON), &started); err != nil { - return -1, err - } - return started.Timestamp, nil -} - -// GetFinishTime gets finished timestamp of a build, -// returning -1 if the build didn't finish or if it failed to get the timestamp -func (b *Build) GetFinishTime() (int64, error) { - var finished Finished - if err := unmarshalJSONFile(path.Join(b.StoragePath, FinishedJSON), &finished); err != nil { - return -1, err - } - return finished.Timestamp, nil -} - -// GetArtifacts gets gcs path for all artifacts of current build -func (b *Build) GetArtifacts() []string { - artifacts, _ := client.ListChildrenFiles(ctx, BucketName, b.GetArtifactsDir()) - return artifacts -} - -// GetArtifactsDir gets gcs path for artifacts of current build -func (b *Build) GetArtifactsDir() string { - return path.Join(b.StoragePath, ArtifactsDir) -} - -// GetBuildLogPath gets "build-log.txt" path for current build -func (b *Build) GetBuildLogPath() string { - return path.Join(b.StoragePath, BuildLog) -} - -// ReadFile reads given file of current build, -// relPath is the file path relative to build directory -func (b *Build) ReadFile(relPath string) ([]byte, error) { - return client.ReadObject(ctx, BucketName, path.Join(b.StoragePath, relPath)) -} - -// ParseLog parses the build log and returns the lines where the checkLog func does not return an empty slice, -// checkLog function should take in the log statement and return a part from that statement that should be in the log output. -func (b *Build) ParseLog(checkLog func(s []string) *string) ([]string, error) { - var logs []string - - f, err := client.NewReader(ctx, b.Bucket, b.GetBuildLogPath()) - if err != nil { - return logs, err - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - if s := checkLog(strings.Fields(scanner.Text())); s != nil { - logs = append(logs, *s) - } - } - return logs, nil -} - -// getBuildIDFromBuildPath digests gcs build path and return last portion of path -func getBuildIDFromBuildPath(buildPath string) (int, error) { - _, buildIDStr := path.Split(strings.TrimRight(buildPath, " /")) - return strconv.Atoi(buildIDStr) -} - -// unmarshalJSONFile reads a file from gcs, parses it with xml and write to v. -// v must be an arbitrary struct, slice, or string. -func unmarshalJSONFile(storagePath string, v interface{}) error { - contents, err := client.ReadObject(ctx, BucketName, storagePath) - if err != nil { - return err - } - return json.Unmarshal(contents, v) -} +var ( + // IsCI returns whether the current environment is a CI environment. + // + // Deprecated: use knative.dev/pkg/test/ci.IsCI + IsCI = ci.IsCI + + // GetLocalArtifactsDir gets the artifacts directory where prow looks for artifacts. + // By default, it will look at the env var ARTIFACTS. + // + // Deprecated: use knative.dev/pkg/test/ci.GetLocalArtifactsDir + GetLocalArtifactsDir = ci.GetLocalArtifactsDir +) diff --git a/test/prow/prow_test.go b/test/prow/prow_test.go deleted file mode 100644 index 67b47bc8a7..0000000000 --- a/test/prow/prow_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// prow_test.go contains unit tests for prow package - -package prow - -import ( - "testing" -) - -const ( - orgName = "test-org" - repoName = "test-repo" - invalidJobType = "invalid" - testJobName = "job_0" -) - -var jobPathTests = []struct { - in string - out string -}{ - {PeriodicJob, "logs/job_0"}, - {PostsubmitJob, "logs/job_0"}, - {PresubmitJob, "pr-logs/pull/test-org_test-repo/0/job_0"}, - {BatchJob, "pr-logs/pull/batch/job_0"}, -} - -func TestJobPath(t *testing.T) { - for _, testData := range jobPathTests { - job := NewJob(testJobName, testData.in, orgName, repoName, 0) - if job.StoragePath != testData.out { - t.Errorf("Expected '%s', actual '%s'", testData.out, job.StoragePath) - } - } -} - -func TestInvalidJobPath(t *testing.T) { - oldLogFatalf := logFatalf - defer func() { logFatalf = oldLogFatalf }() - - var exitString string - expectedExitString := "unknown job spec type: invalid" - logFatalf = func(string, ...interface{}) { - exitString = expectedExitString - } - - NewJob(testJobName, invalidJobType, orgName, repoName, 0) - if exitString != expectedExitString { - t.Fatalf("Expected: %s, actual: %s", exitString, expectedExitString) - } -} - -func TestIsCI(t *testing.T) { - t.Setenv("CI", "true") - if ic := IsCI(); !ic { - t.Fatal("Expected: true, actual: false") - } -} - -func TestGetArtifacts(t *testing.T) { - // Test we can read from the env var - t.Setenv("ARTIFACTS", "test") - v := GetLocalArtifactsDir() - if v != "test" { - t.Fatalf("Actual artifacts dir: '%s' and Expected: 'test'", v) - } - - // Test we can use the default - t.Setenv("ARTIFACTS", "") - v = GetLocalArtifactsDir() - if v != "artifacts" { - t.Fatalf("Actual artifacts dir: '%s' and Expected: 'artifacts'", v) - } -}